summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2023-03-30 08:53:12 +0200
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2023-03-30 08:53:12 +0200
commit8ba264f418f734aade3a77086bb1d51d0e2723ce (patch)
treeca6749e28544c393305b8606c0230ac50126041a /drivers
parentfbb3b3500f76ec8b741bd2d0e761ca3e856ad924 (diff)
parent82bbec189ab34873688484cd14189a5392946fbb (diff)
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Backmerge to get rc4. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c130
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c142
-rw-r--r--drivers/accel/habanalabs/common/decoder.c22
-rw-r--r--drivers/accel/habanalabs/common/device.c315
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h129
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c14
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c60
-rw-r--r--drivers/accel/habanalabs/common/irq.c73
-rw-r--r--drivers/accel/habanalabs/common/memory.c133
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c15
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c6
-rw-r--r--drivers/accel/habanalabs/common/security.c6
-rw-r--r--drivers/accel/habanalabs/common/security.h2
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c65
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c1543
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_masks.h3
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_security.c1
-rw-r--r--drivers/accel/habanalabs/goya/goya.c21
-rw-r--r--drivers/accel/habanalabs/include/common/cpucp_if.h9
-rw-r--r--drivers/accel/habanalabs/include/common/hl_boot_if.h47
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h5
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2.h2
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h4
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h5294
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h5
-rw-r--r--drivers/acpi/pptt.c5
-rw-r--r--drivers/acpi/processor_driver.c12
-rw-r--r--drivers/acpi/processor_thermal.c14
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/video_detect.c15
-rw-r--r--drivers/acpi/x86/utils.c45
-rw-r--r--drivers/ata/pata_parport/pata_parport.c30
-rw-r--r--drivers/atm/idt77252.c11
-rw-r--r--drivers/block/loop.c25
-rw-r--r--drivers/block/null_blk/main.c31
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/ublk_drv.c34
-rw-r--r--drivers/bluetooth/btintel.c51
-rw-r--r--drivers/bluetooth/btintel.h7
-rw-r--r--drivers/bluetooth/btqcomsmd.c17
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bluetooth/btusb.c10
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c6
-rw-r--r--drivers/char/tpm/tpm-chip.c60
-rw-r--r--drivers/char/tpm/tpm.h73
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/clk-fixed-mmio.c1
-rw-r--r--drivers/clk/clk-fsl-sai.c1
-rw-r--r--drivers/clk/clk-k210.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c1
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c1
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c3
-rw-r--r--drivers/dma-buf/dma-fence-array.c11
-rw-r--r--drivers/dma-buf/dma-fence-chain.c12
-rw-r--r--drivers/dma-buf/dma-fence.c59
-rw-r--r--drivers/dma-buf/dma-resv.c22
-rw-r--r--drivers/firmware/arm_scmi/bus.c3
-rw-r--r--drivers/firmware/arm_scmi/driver.c14
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c37
-rw-r--r--drivers/firmware/efi/earlycon.c16
-rw-r--r--drivers/firmware/efi/efi-init.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/arm64.c39
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-entry.c11
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/efistub.h43
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c1
-rw-r--r--drivers/firmware/efi/libstub/screen_info.c9
-rw-r--r--drivers/firmware/efi/libstub/smbios.c15
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S2
-rw-r--r--drivers/firmware/efi/libstub/zboot.c5
-rw-r--r--drivers/firmware/efi/sysfb_efi.c13
-rw-r--r--drivers/firmware/qcom_scm.c2
-rw-r--r--drivers/firmware/sysfb.c4
-rw-r--r--drivers/firmware/sysfb_simplefb.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c1967
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link.c)13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig14
-rw-r--r--drivers/gpu/drm/amd/display/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c227
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c88
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c149
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c377
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h588
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h173
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h577
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h351
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c274
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h22
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_msg_types.h (renamed from drivers/gpu/drm/amd/display/include/hdcp_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h663
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h456
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h674
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h1109
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h3276
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c79
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h212
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c79
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c2069
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c4
-rw-r--r--drivers/gpu/drm/bridge/Kconfig12
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c4
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c1967
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c37
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c9
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c13
-rw-r--r--drivers/gpu/drm/drm_vblank.c53
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1813
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c53
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c21
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c37
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c4047
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h21
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c161
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c266
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c760
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c592
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c429
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c157
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c173
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c831
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c338
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_mg_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c508
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c186
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c322
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h461
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c408
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm_types.h (renamed from drivers/gpu/drm/i915/intel_pm_types.h)8
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c309
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h7
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c170
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c218
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c388
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c72
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c109
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c78
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c140
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c44
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c116
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c142
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h997
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h31
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c28
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c77
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4112
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c19
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c65
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c46
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c134
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c152
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c1
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c13
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c166
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h12
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c46
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/host1x/dev.c5
-rw-r--r--drivers/hid/hid-core.c32
-rw-r--r--drivers/hid/hid-cp2112.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/hwmon/adt7475.c8
-rw-r--r--drivers/hwmon/hwmon.c7
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/it87.c4
-rw-r--r--drivers/hwmon/ltc2992.c1
-rw-r--r--drivers/hwmon/peci/cputemp.c8
-rw-r--r--drivers/hwmon/pmbus/adm1266.c1
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c75
-rw-r--r--drivers/hwmon/tmp513.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c15
-rw-r--r--drivers/i2c/busses/i2c-hisi.c13
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c18
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c3
-rw-r--r--drivers/i2c/i2c-core-base.c13
-rw-r--r--drivers/i2c/i2c-dev.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c2
-rw-r--r--drivers/i2c/i2c-slave-testunit.c2
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/interconnect/core.c68
-rw-r--r--drivers/interconnect/imx/imx.c20
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c29
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c30
-rw-r--r--drivers/interconnect/qcom/msm8974.c20
-rw-r--r--drivers/interconnect/qcom/osm-l3.c16
-rw-r--r--drivers/interconnect/qcom/qcm2290.c4
-rw-r--r--drivers/interconnect/qcom/sm8450.c98
-rw-r--r--drivers/interconnect/qcom/sm8550.c99
-rw-r--r--drivers/interconnect/samsung/exynos.c30
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/dm-crypt.c16
-rw-r--r--drivers/md/dm-stats.c7
-rw-r--r--drivers/md/dm-stats.h2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/ov2685.c5
-rw-r--r--drivers/media/i2c/ov5695.c5
-rw-r--r--drivers/memory/tegra/mc.c16
-rw-r--r--drivers/memory/tegra/tegra124-emc.c12
-rw-r--r--drivers/memory/tegra/tegra20-emc.c12
-rw-r--r--drivers/memory/tegra/tegra30-emc.c12
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c6
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c105
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h354
-rw-r--r--drivers/misc/mei/pci-me.c20
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c2
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mtd/maps/pismo.c5
-rw-r--r--drivers/mtd/ubi/block.c5
-rw-r--r--drivers/net/bonding/bond_main.c23
-rw-r--r--drivers/net/can/cc770/cc770_platform.c12
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530.c122
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c16
-rw-r--r--drivers/net/ethernet/Kconfig10
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c15
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c28
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c56
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c4
-rw-r--r--drivers/net/ethernet/fealnx.c1953
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c5
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c137
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c20
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c14
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c32
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c11
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c12
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c19
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c31
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c41
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h5
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ipa/gsi_reg.c9
-rw-r--r--drivers/net/ipa/gsi_reg.h4
-rw-r--r--drivers/net/ipa/ipa_reg.c28
-rw-r--r--drivers/net/ipa/ipa_reg.h21
-rw-r--r--drivers/net/ipa/reg.h3
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c56
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c44
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/mdio/acpi_mdio.c10
-rw-r--r--drivers/net/mdio/mdio-thunder.c1
-rw-r--r--drivers/net/mdio/of_mdio.c12
-rw-r--r--drivers/net/phy/mdio_devres.c11
-rw-r--r--drivers/net/phy/microchip.c32
-rw-r--r--drivers/net/phy/mscc/mscc_main.c24
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/sfp.c5
-rw-r--r--drivers/net/phy/smsc.c19
-rw-r--r--drivers/net/usb/asix_devices.c32
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/lan78xx.c45
-rw-r--r--drivers/net/usb/plusb.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/veth.c52
-rw-r--r--drivers/net/virtio_net.c171
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c11
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c40
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c3
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/pn533/usb.c1
-rw-r--r--drivers/nfc/st-nci/ndlc.c6
-rw-r--r--drivers/nvme/host/core.c31
-rw-r--r--drivers/nvme/host/ioctl.c14
-rw-r--r--drivers/nvme/host/multipath.c8
-rw-r--r--drivers/nvme/host/pci.c5
-rw-r--r--drivers/nvme/host/tcp.c33
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/pci/bus.c21
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c2
-rw-r--r--drivers/platform/mellanox/Kconfig9
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/amd/pmc.c30
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c12
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c5
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c5
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h1
-rw-r--r--drivers/platform/x86/intel/tpmi.c14
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c1
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c2
-rw-r--r--drivers/power/supply/da9150-charger.c1
-rw-r--r--drivers/power/supply/rk817_charger.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c3
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h5
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c90
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c25
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c20
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/storvsc_drv.c16
-rw-r--r--drivers/soc/qcom/llcc-qcom.c6
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c10
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/r8188eu/Kconfig16
-rw-r--r--drivers/staging/r8188eu/Makefile48
-rw-r--r--drivers/staging/r8188eu/TODO16
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c1181
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c658
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c1529
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c74
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c335
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c1150
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c479
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c160
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c255
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c2067
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c7817
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c1918
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c445
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c2010
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c29
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c1374
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c490
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c1551
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c2179
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c654
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c733
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c212
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c269
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c900
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c149
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c139
-rw-r--r--drivers/staging/r8188eu/hal/hal_intf.c50
-rw-r--r--drivers/staging/r8188eu/hal/odm.c821
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c349
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c264
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c694
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c146
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c922
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c705
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c405
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c161
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c627
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c1069
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c476
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h97
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyReg.h1072
-rw-r--r--drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h49
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_BB.h27
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h12
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_RF.h13
-rw-r--r--drivers/staging/r8188eu/include/HalPhyRf_8188e.h36
-rw-r--r--drivers/staging/r8188eu/include/HalPwrSeqCmd.h18
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h42
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h224
-rw-r--r--drivers/staging/r8188eu/include/hal_com.h146
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h44
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h817
-rw-r--r--drivers/staging/r8188eu/include/odm.h416
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h69
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h35
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11N.h47
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h30
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h153
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_cmd.h90
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h28
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h181
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h40
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h18
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h1142
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_xmit.h130
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h34
-rw-r--r--drivers/staging/r8188eu/include/rtw_br_ext.h43
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h925
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h15
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_event.h97
-rw-r--r--drivers/staging/r8188eu/include/rtw_fw.h17
-rw-r--r--drivers/staging/r8188eu/include/rtw_ht.h28
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h33
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl.h13
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_set.h25
-rw-r--r--drivers/staging/r8188eu/include/rtw_iol.h55
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h57
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h574
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h753
-rw-r--r--drivers/staging/r8188eu/include/rtw_p2p.h118
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h111
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h347
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h80
-rw-r--r--drivers/staging/r8188eu/include/rtw_security.h231
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h334
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h313
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h57
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h21
-rw-r--r--drivers/staging/r8188eu/include/wifi.h773
-rw-r--r--drivers/staging/r8188eu/include/wlan_bssdef.h272
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c3775
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c807
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c227
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c445
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c136
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c35
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c32
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c12
-rw-r--r--drivers/tee/amdtee/core.c29
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c5
-rw-r--r--drivers/thermal/thermal_core.c106
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_sysfs.c74
-rw-r--r--drivers/thunderbolt/debugfs.c12
-rw-r--r--drivers/thunderbolt/nhi.c49
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/quirks.c44
-rw-r--r--drivers/thunderbolt/retimer.c23
-rw-r--r--drivers/thunderbolt/sb_regs.h1
-rw-r--r--drivers/thunderbolt/switch.c4
-rw-r--r--drivers/thunderbolt/tb.h15
-rw-r--r--drivers/thunderbolt/usb4.c53
-rw-r--r--drivers/tty/hvc/hvc_xen.c19
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_em.c4
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig4
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c23
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c11
-rw-r--r--drivers/tty/serial/sc16is7xx.c6
-rw-r--r--drivers/tty/vt/vt.c3
-rw-r--r--drivers/ufs/core/ufshcd.c5
-rw-r--r--drivers/ufs/host/ufs-qcom.c10
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c19
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c27
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/core.c11
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/dwc2/drd.c3
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/platform.c19
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/gadget.c14
-rw-r--r--drivers/usb/gadget/composite.c7
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h1
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c28
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c33
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h1
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c6
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c11
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c2
-rw-r--r--drivers/vfio/pci/mlx5/main.c14
-rw-r--r--drivers/vhost/vdpa.c3
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c3
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c14
-rw-r--r--drivers/video/fbdev/clps711x-fb.c3
-rw-r--r--drivers/video/fbdev/core/fb_defio.c17
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c3
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c3
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/Makefile1
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c86
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c30
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c3
-rw-r--r--drivers/video/fbdev/sm501fb.c4
-rw-r--r--drivers/video/fbdev/stifb.c27
-rw-r--r--drivers/video/fbdev/tcx.c3
-rw-r--r--drivers/video/fbdev/tgafb.c3
-rw-r--r--drivers/video/fbdev/wm8505fb.c4
-rw-r--r--drivers/video/fbdev/xilinxfb.c6
-rw-r--r--drivers/video/logo/pnmtologo.c674
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c128
-rw-r--r--drivers/w1/masters/ds2482.c5
-rw-r--r--drivers/xen/xenfs/xensyms.c10
1057 files changed, 41281 insertions, 72452 deletions
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 8270db0a72a2..af9d2e22c6e7 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -14,10 +14,10 @@
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
- HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
+ HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
-#define MAX_TS_ITER_NUM 10
+#define MAX_TS_ITER_NUM 100
/**
* enum hl_cs_wait_status - cs wait status
@@ -657,7 +657,7 @@ static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
/*
* we get refcount upon reservation of signals or signal/wait cs for the
* hw_sob object, and need to put it when the first staged cs
- * (which cotains the encaps signals) or cs signal/wait is completed.
+ * (which contains the encaps signals) or cs signal/wait is completed.
*/
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
@@ -1082,9 +1082,8 @@ static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend, *temp;
- unsigned long flags;
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
@@ -1095,7 +1094,7 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
complete_all(&pend->fence.completion);
}
}
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -1168,6 +1167,22 @@ static void cs_completion(struct work_struct *work)
hl_complete_job(hdev, job);
}
+u32 hl_get_active_cs_num(struct hl_device *hdev)
+{
+ u32 active_cs_num = 0;
+ struct hl_cs *cs;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
+ if (!cs->completed)
+ active_cs_num++;
+
+ spin_unlock(&hdev->cs_mirror_lock);
+
+ return active_cs_num;
+}
+
static int validate_queue_index(struct hl_device *hdev,
struct hl_cs_chunk *chunk,
enum hl_queue_type *queue_type,
@@ -1304,6 +1319,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_UNRESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
return CS_TYPE_ENGINE_CORE;
+ else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
+ return CS_TYPE_ENGINES;
else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
return CS_TYPE_FLUSH_PCI_HBW_WRITES;
else
@@ -2429,10 +2446,13 @@ out:
static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
u32 num_engine_cores, u32 core_command)
{
- int rc;
struct hl_device *hdev = hpriv->hdev;
void __user *engine_cores_arr;
u32 *cores;
+ int rc;
+
+ if (!hdev->asic_prop.supports_engine_modes)
+ return -EPERM;
if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
@@ -2461,6 +2481,48 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
return rc;
}
+static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
+ u32 num_engines, enum hl_engine_command command)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 *engines, max_num_of_engines;
+ void __user *engines_arr;
+ int rc;
+
+ if (!hdev->asic_prop.supports_engine_modes)
+ return -EPERM;
+
+ if (command >= HL_ENGINE_COMMAND_MAX) {
+ dev_err(hdev->dev, "Engine command is invalid\n");
+ return -EINVAL;
+ }
+
+ max_num_of_engines = hdev->asic_prop.max_num_of_engines;
+ if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
+ max_num_of_engines = hdev->asic_prop.num_engine_cores;
+
+ if (!num_engines || num_engines > max_num_of_engines) {
+ dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
+ return -EINVAL;
+ }
+
+ engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
+ engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
+ dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
+ kfree(engines);
+ return -EFAULT;
+ }
+
+ rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
+ kfree(engines);
+
+ return rc;
+}
+
static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
{
struct hl_device *hdev = hpriv->hdev;
@@ -2532,6 +2594,10 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
args->in.num_engine_cores, args->in.core_command);
break;
+ case CS_TYPE_ENGINES:
+ rc = cs_ioctl_engines(hpriv, args->in.engines,
+ args->in.num_engines, args->in.engine_command);
+ break;
case CS_TYPE_FLUSH_PCI_HBW_WRITES:
rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
break;
@@ -3143,8 +3209,9 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_user_pending_interrupt *cb_last =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
- unsigned long flags, iter_counter = 0;
+ unsigned long iter_counter = 0;
u64 current_cq_counter;
+ ktime_t timestamp;
/* Validate ts_offset not exceeding last max */
if (requested_offset_record >= cb_last) {
@@ -3153,8 +3220,10 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
return -EINVAL;
}
+ timestamp = ktime_get();
+
start_over:
- spin_lock_irqsave(wait_list_lock, flags);
+ spin_lock(wait_list_lock);
/* Unregister only if we didn't reach the target value
* since in this case there will be no handling in irq context
@@ -3165,7 +3234,7 @@ start_over:
current_cq_counter = *requested_offset_record->cq_kernel_addr;
if (current_cq_counter < requested_offset_record->cq_target_value) {
list_del(&requested_offset_record->wait_list_node);
- spin_unlock_irqrestore(wait_list_lock, flags);
+ spin_unlock(wait_list_lock);
hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
@@ -3176,13 +3245,14 @@ start_over:
dev_dbg(buf->mmg->dev,
"ts node in middle of irq handling\n");
- /* irq handling in the middle give it time to finish */
- spin_unlock_irqrestore(wait_list_lock, flags);
- usleep_range(1, 10);
+ /* irq thread handling in the middle give it time to finish */
+ spin_unlock(wait_list_lock);
+ usleep_range(100, 1000);
if (++iter_counter == MAX_TS_ITER_NUM) {
dev_err(buf->mmg->dev,
- "handling registration interrupt took too long!!\n");
- return -EINVAL;
+ "Timestamp offset processing reached timeout of %lld ms\n",
+ ktime_ms_delta(ktime_get(), timestamp));
+ return -EAGAIN;
}
goto start_over;
@@ -3197,7 +3267,7 @@ start_over:
(u64 *) cq_cb->kernel_address + cq_offset;
requested_offset_record->cq_target_value = target_value;
- spin_unlock_irqrestore(wait_list_lock, flags);
+ spin_unlock(wait_list_lock);
}
*pend = requested_offset_record;
@@ -3217,7 +3287,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_user_pending_interrupt *pend;
struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
- unsigned long timeout, flags;
+ unsigned long timeout;
long completion_rc;
int rc = 0;
@@ -3264,7 +3334,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
pend->cq_target_value = target_value;
}
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@@ -3272,7 +3342,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (*pend->cq_kernel_addr >= target_value) {
if (register_ts_record)
pend->ts_reg_info.in_use = 0;
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_COMPLETED;
@@ -3284,7 +3354,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto set_timestamp;
}
} else if (!timeout_us) {
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_BUSY;
pend->fence.timestamp = ktime_get();
goto set_timestamp;
@@ -3309,7 +3379,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
pend->ts_reg_info.in_use = 1;
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
if (register_ts_record) {
rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
@@ -3353,9 +3423,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* for ts record, the node will be deleted in the irq handler after
* we reach the target value.
*/
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
set_timestamp:
*timestamp = ktime_to_ns(pend->fence.timestamp);
@@ -3383,7 +3453,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- unsigned long timeout, flags;
+ unsigned long timeout;
u64 completion_value;
long completion_rc;
int rc = 0;
@@ -3403,9 +3473,9 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
@@ -3436,14 +3506,14 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
@@ -3480,9 +3550,9 @@ wait_again:
}
remove_pending_user_interrupt:
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ spin_unlock(&interrupt->wait_list_lock);
*timestamp = ktime_to_ns(pend->fence.timestamp);
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 945c0e6758ca..22dd17c077c0 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -258,7 +258,7 @@ static int vm_show(struct seq_file *s, void *data)
if (!dev_entry->hdev->mmu_enable)
return 0;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
once = false;
@@ -329,7 +329,7 @@ static int vm_show(struct seq_file *s, void *data)
}
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
ctx = hl_get_compute_ctx(dev_entry->hdev);
if (ctx) {
@@ -1583,209 +1583,216 @@ static const struct file_operations hl_debugfs_fops = {
.release = single_release,
};
-static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
+static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
{
debugfs_create_u8("i2c_bus",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_bus);
debugfs_create_u8("i2c_addr",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_addr);
debugfs_create_u8("i2c_reg",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_reg);
debugfs_create_u8("i2c_len",
0644,
- dev_entry->root,
+ root,
&dev_entry->i2c_len);
debugfs_create_file("i2c_data",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_i2c_data_fops);
debugfs_create_file("led0",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led0_fops);
debugfs_create_file("led1",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led1_fops);
debugfs_create_file("led2",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_led2_fops);
}
-void hl_debugfs_add_device(struct hl_device *hdev)
+static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
+ struct dentry *root)
{
- struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
int count = ARRAY_SIZE(hl_debugfs_list);
struct hl_debugfs_entry *entry;
int i;
- dev_entry->hdev = hdev;
- dev_entry->entry_arr = kmalloc_array(count,
- sizeof(struct hl_debugfs_entry),
- GFP_KERNEL);
- if (!dev_entry->entry_arr)
- return;
-
- dev_entry->data_dma_blob_desc.size = 0;
- dev_entry->data_dma_blob_desc.data = NULL;
- dev_entry->mon_dump_blob_desc.size = 0;
- dev_entry->mon_dump_blob_desc.data = NULL;
-
- INIT_LIST_HEAD(&dev_entry->file_list);
- INIT_LIST_HEAD(&dev_entry->cb_list);
- INIT_LIST_HEAD(&dev_entry->cs_list);
- INIT_LIST_HEAD(&dev_entry->cs_job_list);
- INIT_LIST_HEAD(&dev_entry->userptr_list);
- INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
- mutex_init(&dev_entry->file_mutex);
- init_rwsem(&dev_entry->state_dump_sem);
- spin_lock_init(&dev_entry->cb_spinlock);
- spin_lock_init(&dev_entry->cs_spinlock);
- spin_lock_init(&dev_entry->cs_job_spinlock);
- spin_lock_init(&dev_entry->userptr_spinlock);
- spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
-
- dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
- hl_debug_root);
-
debugfs_create_x64("memory_scrub_val",
0644,
- dev_entry->root,
+ root,
&hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_mem_scrub_fops);
debugfs_create_x64("addr",
0644,
- dev_entry->root,
+ root,
&dev_entry->addr);
debugfs_create_file("data32",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_data32b_fops);
debugfs_create_file("data64",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_clk_gate_fops);
debugfs_create_file("stop_on_err",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_razwi_check_fops);
debugfs_create_file("dma_size",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_dma_size_fops);
debugfs_create_blob("data_dma",
0400,
- dev_entry->root,
+ root,
&dev_entry->data_dma_blob_desc);
debugfs_create_file("monitor_dump_trig",
0200,
- dev_entry->root,
+ root,
dev_entry,
&hl_monitor_dump_fops);
debugfs_create_blob("monitor_dump",
0400,
- dev_entry->root,
+ root,
&dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
- dev_entry->root,
+ root,
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
- dev_entry->root,
+ root,
dev_entry,
&hl_state_dump_fops);
debugfs_create_file("timeout_locked",
0644,
- dev_entry->root,
+ root,
dev_entry,
&hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
- dev_entry->root,
+ root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
- dev_entry->root,
+ root,
entry,
&hl_debugfs_fops);
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
+}
+
+void hl_debugfs_add_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+ int count = ARRAY_SIZE(hl_debugfs_list);
+
+ dev_entry->hdev = hdev;
+ dev_entry->entry_arr = kmalloc_array(count,
+ sizeof(struct hl_debugfs_entry),
+ GFP_KERNEL);
+ if (!dev_entry->entry_arr)
+ return;
+
+ dev_entry->data_dma_blob_desc.size = 0;
+ dev_entry->data_dma_blob_desc.data = NULL;
+ dev_entry->mon_dump_blob_desc.size = 0;
+ dev_entry->mon_dump_blob_desc.data = NULL;
+
+ INIT_LIST_HEAD(&dev_entry->file_list);
+ INIT_LIST_HEAD(&dev_entry->cb_list);
+ INIT_LIST_HEAD(&dev_entry->cs_list);
+ INIT_LIST_HEAD(&dev_entry->cs_job_list);
+ INIT_LIST_HEAD(&dev_entry->userptr_list);
+ INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
+ mutex_init(&dev_entry->file_mutex);
+ init_rwsem(&dev_entry->state_dump_sem);
+ spin_lock_init(&dev_entry->cb_spinlock);
+ spin_lock_init(&dev_entry->cs_spinlock);
+ spin_lock_init(&dev_entry->cs_job_spinlock);
+ spin_lock_init(&dev_entry->userptr_spinlock);
+ mutex_init(&dev_entry->ctx_mem_hash_mutex);
+
+ dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
+ hl_debug_root);
+ add_files_to_device(hdev, dev_entry, dev_entry->root);
if (!hdev->asic_prop.fw_security_enabled)
- add_secured_nodes(dev_entry);
+ add_secured_nodes(dev_entry, dev_entry->root);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
@@ -1795,6 +1802,7 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
debugfs_remove_recursive(entry->root);
+ mutex_destroy(&entry->ctx_mem_hash_mutex);
mutex_destroy(&entry->file_mutex);
vfree(entry->data_dma_blob_desc.data);
@@ -1901,18 +1909,18 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
- spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_del(&ctx->debugfs_list);
- spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
/**
diff --git a/drivers/accel/habanalabs/common/decoder.c b/drivers/accel/habanalabs/common/decoder.c
index 2aab14d74b53..69c78c1784b4 100644
--- a/drivers/accel/habanalabs/common/decoder.c
+++ b/drivers/accel/habanalabs/common/decoder.c
@@ -46,7 +46,7 @@ static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
{
bool reset_required = false;
- u32 irq_status;
+ u32 irq_status, event_mask;
irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
@@ -54,17 +54,27 @@ static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_
dec_print_abnrm_intr_source(hdev, irq_status);
- if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
- reset_required = true;
-
/* Clear the interrupt */
WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
/* Flush the interrupt clear */
RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
- if (reset_required)
- hl_device_reset(hdev, HL_DRV_RESET_HARD);
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK) {
+ reset_required = true;
+ event_mask = HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ } else if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK) {
+ event_mask = HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
+ } else {
+ event_mask = HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ }
+
+ if (reset_required) {
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
+ hl_device_cond_reset(hdev, 0, event_mask);
+ } else {
+ hl_notifier_event_send_all(hdev, event_mask);
+ }
}
static void dec_completion_abnrm(struct work_struct *work)
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 9933e5858a36..713005998cbc 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -22,7 +22,6 @@
enum dma_alloc_type {
DMA_ALLOC_COHERENT,
- DMA_ALLOC_CPU_ACCESSIBLE,
DMA_ALLOC_POOL,
};
@@ -121,9 +120,6 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
case DMA_ALLOC_COHERENT:
ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
break;
- case DMA_ALLOC_CPU_ACCESSIBLE:
- ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
- break;
case DMA_ALLOC_POOL:
ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
break;
@@ -147,9 +143,6 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
case DMA_ALLOC_COHERENT:
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
break;
- case DMA_ALLOC_CPU_ACCESSIBLE:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
- break;
case DMA_ALLOC_POOL:
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
@@ -170,18 +163,6 @@ void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void
hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
-void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
- dma_addr_t *dma_handle, const char *caller)
-{
- return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
-}
-
-void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
- const char *caller)
-{
- hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
-}
-
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller)
{
@@ -194,6 +175,16 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_
hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+{
+ return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+}
+
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
+}
+
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -389,18 +380,17 @@ bool hl_ctrl_device_operational(struct hl_device *hdev,
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
- u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {};
-
- BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4);
-
- pad_width[3] = idle_mask[3] ? 16 : 0;
- pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0;
- pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0;
- pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0;
-
- dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n",
- message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2],
- pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]);
+ if (idle_mask[3])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
+ message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
+ else if (idle_mask[2])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
+ message, idle_mask[2], idle_mask[1], idle_mask[0]);
+ else if (idle_mask[1])
+ dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
+ message, idle_mask[1], idle_mask[0]);
+ else
+ dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
}
static void hpriv_release(struct kref *ref)
@@ -423,6 +413,9 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
+ /* There should be no memory buffers at this point and handles IDR can be destroyed */
+ hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
+
/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
* reset that waits for device release.
*/
@@ -492,6 +485,36 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
return kref_put(&hpriv->refcount, hpriv_release);
}
+static void print_device_in_use_info(struct hl_device *hdev, const char *message)
+{
+ u32 active_cs_num, dmabuf_export_cnt;
+ bool unknown_reason = true;
+ char buf[128];
+ size_t size;
+ int offset;
+
+ size = sizeof(buf);
+ offset = 0;
+
+ active_cs_num = hl_get_active_cs_num(hdev);
+ if (active_cs_num) {
+ unknown_reason = false;
+ offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
+ }
+
+ dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
+ if (dmabuf_export_cnt) {
+ unknown_reason = false;
+ offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
+ dmabuf_export_cnt);
+ }
+
+ if (unknown_reason)
+ scnprintf(buf + offset, size - offset, " [unknown reason]");
+
+ dev_notice(hdev->dev, "%s%s\n", message, buf);
+}
+
/*
* hl_device_release - release function for habanalabs device
*
@@ -514,17 +537,20 @@ static int hl_device_release(struct inode *inode, struct file *filp)
}
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+
+ /* Memory buffers might be still in use at this point and thus the handles IDR destruction
+ * is postponed to hpriv_release().
+ */
hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) {
- dev_notice(hdev->dev, "User process closed FD but device still in use\n");
+ print_device_in_use_info(hdev, "User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
- hdev->last_open_session_duration_jif =
- jiffies - hdev->last_successful_open_jif;
+ hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
return 0;
}
@@ -617,7 +643,7 @@ static void device_release_func(struct device *dev)
* device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
- * @hclass: pointer to the class object of the device
+ * @class: pointer to the class object of the device
* @minor: minor number of the specific device
* @fpos: file operations to install for this device
* @name: name of the device as it will appear in the filesystem
@@ -626,7 +652,7 @@ static void device_release_func(struct device *dev)
*
* Initialize a cdev and a Linux device for habanalabs's device.
*/
-static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
+static int device_init_cdev(struct hl_device *hdev, struct class *class,
int minor, const struct file_operations *fops,
char *name, struct cdev *cdev,
struct device **dev)
@@ -640,7 +666,7 @@ static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
device_initialize(*dev);
(*dev)->devt = MKDEV(hdev->major, minor);
- (*dev)->class = hclass;
+ (*dev)->class = class;
(*dev)->release = device_release_func;
dev_set_drvdata(*dev, hdev);
dev_set_name(*dev, "%s", name);
@@ -733,14 +759,14 @@ static void device_hard_reset_pending(struct work_struct *work)
static void device_release_watchdog_func(struct work_struct *work)
{
- struct hl_device_reset_work *device_release_watchdog_work =
- container_of(work, struct hl_device_reset_work, reset_work.work);
- struct hl_device *hdev = device_release_watchdog_work->hdev;
+ struct hl_device_reset_work *watchdog_work =
+ container_of(work, struct hl_device_reset_work, reset_work.work);
+ struct hl_device *hdev = watchdog_work->hdev;
u32 flags;
- dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n");
+ dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
- flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR;
+ flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
hl_device_reset(hdev, flags);
}
@@ -805,7 +831,7 @@ static int device_early_init(struct hl_device *hdev)
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
- snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
+ snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
@@ -814,14 +840,16 @@ static int device_early_init(struct hl_device *hdev)
}
}
- hdev->eq_wq = create_singlethread_workqueue("hl-events");
+ snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
+ hdev->eq_wq = create_singlethread_workqueue(workq_name);
if (hdev->eq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
- hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
+ hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->cs_cmplt_wq) {
dev_err(hdev->dev,
"Failed to allocate CS completions workqueue\n");
@@ -829,7 +857,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
- hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
+ hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
@@ -837,15 +866,15 @@ static int device_early_init(struct hl_device *hdev)
goto free_cs_cmplt_wq;
}
- hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
+ snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
+ hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->prefetch_wq) {
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
rc = -ENOMEM;
goto free_ts_free_wq;
}
- hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
- GFP_KERNEL);
+ hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
goto free_prefetch_wq;
@@ -857,7 +886,8 @@ static int device_early_init(struct hl_device *hdev)
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
- hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
+ snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
+ hdev->reset_wq = create_singlethread_workqueue(workq_name);
if (!hdev->reset_wq) {
rc = -ENOMEM;
dev_err(hdev->dev, "Failed to create device reset WQ\n");
@@ -887,6 +917,7 @@ static int device_early_init(struct hl_device *hdev)
free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
free_prefetch_wq:
@@ -930,6 +961,7 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock);
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
+ hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
@@ -953,6 +985,8 @@ static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
work_heartbeat.work);
+ struct hl_info_fw_err_info info = {0};
+ u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
if (!hl_device_operational(hdev, NULL))
goto reschedule;
@@ -963,7 +997,10 @@ static void hl_device_heartbeat(struct work_struct *work)
if (hl_device_operational(hdev, NULL))
dev_err(hdev->dev, "Device heartbeat failed!\n");
- hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
+ info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
+ info.event_mask = &event_mask;
+ hl_handle_fw_err(hdev, &info);
+ hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
return;
@@ -1402,7 +1439,7 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
dev_warn(hdev->dev,
- "Failed to disable PCI access by F/W\n");
+ "Failed to disable FW's PCI access\n");
}
}
@@ -1424,12 +1461,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
- bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
- reset_upon_device_release = false, schedule_hard_reset = false,
- delay_reset, from_dev_release, from_watchdog_thread;
+ bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
+ schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx;
- int i, rc;
+ int i, rc, hw_fini_rc;
if (!hdev->init_done) {
dev_err(hdev->dev, "Can't reset before initialization is done\n");
@@ -1442,6 +1478,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
+ reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
@@ -1449,30 +1486,26 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
}
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
- hard_instead_soft = true;
+ dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
hard_reset = true;
}
- if (hdev->reset_upon_device_release && from_dev_release) {
+ if (reset_upon_device_release) {
if (hard_reset) {
dev_crit(hdev->dev,
"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
return -EINVAL;
}
- reset_upon_device_release = true;
-
goto do_reset;
}
if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
- hard_instead_soft = true;
+ dev_dbg(hdev->dev,
+ "asic doesn't allow inference soft reset - do hard-reset instead\n");
hard_reset = true;
}
- if (hard_instead_soft)
- dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
-
do_reset:
/* Re-entry of reset thread */
if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
@@ -1480,14 +1513,14 @@ do_reset:
/*
* Prevent concurrency in this function - only one reset should be
- * done at any given time. Only need to perform this if we didn't
- * get from the dedicated hard reset thread
+ * done at any given time. We need to perform this only if we didn't
+ * get here from a dedicated hard reset thread.
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
- /* We only allow scheduling of a hard reset during compute reset */
+ /* We allow scheduling of a hard reset only during a compute reset */
if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
@@ -1505,15 +1538,17 @@ do_reset:
/* Cancel the device release watchdog work if required.
* In case of reset-upon-device-release while the release watchdog work is
- * scheduled, do hard-reset instead of compute-reset.
+ * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
*/
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
+ struct hl_device_reset_work *watchdog_work =
+ &hdev->device_release_watchdog_work;
+
hdev->reset_info.watchdog_active = 0;
if (!from_watchdog_thread)
- cancel_delayed_work_sync(
- &hdev->device_release_watchdog_work.reset_work);
+ cancel_delayed_work_sync(&watchdog_work->reset_work);
- if (from_dev_release) {
+ if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
hdev->reset_info.in_compute_reset = 0;
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
@@ -1524,6 +1559,7 @@ do_reset:
if (delay_reset)
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
+escalate_reset_flow:
handle_reset_trigger(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
@@ -1539,7 +1575,6 @@ do_reset:
dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
-again:
if ((hard_reset) && (!from_hard_reset_thread)) {
hdev->reset_info.hard_reset_pending = true;
@@ -1592,7 +1627,7 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
+ hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@@ -1619,6 +1654,10 @@ kill_processes:
hl_ctx_put(ctx);
}
+ if (hw_fini_rc) {
+ rc = hw_fini_rc;
+ goto out_err;
+ }
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
@@ -1787,7 +1826,7 @@ kill_processes:
hdev->disabled = true;
hard_reset = true;
handle_reset_trigger(hdev, flags);
- goto again;
+ goto escalate_reset_flow;
}
}
@@ -1804,20 +1843,19 @@ out_err:
"%s Failed to reset! Device is NOT usable\n",
dev_name(&(hdev)->pdev->dev));
hdev->reset_info.hard_reset_cnt++;
- } else if (reset_upon_device_release) {
- spin_unlock(&hdev->reset_info.lock);
- dev_err(hdev->dev, "Failed to reset device after user release\n");
- flags |= HL_DRV_RESET_HARD;
- flags &= ~HL_DRV_RESET_DEV_RELEASE;
- hard_reset = true;
- goto again;
} else {
+ if (reset_upon_device_release) {
+ dev_err(hdev->dev, "Failed to reset device after user release\n");
+ flags &= ~HL_DRV_RESET_DEV_RELEASE;
+ } else {
+ dev_err(hdev->dev, "Failed to do compute reset\n");
+ hdev->reset_info.compute_reset_cnt++;
+ }
+
spin_unlock(&hdev->reset_info.lock);
- dev_err(hdev->dev, "Failed to do compute reset\n");
- hdev->reset_info.compute_reset_cnt++;
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
- goto again;
+ goto escalate_reset_flow;
}
hdev->reset_info.in_reset = 0;
@@ -1840,10 +1878,6 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
{
struct hl_ctx *ctx = NULL;
- /* Device release watchdog is only for hard reset */
- if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset)
- goto device_reset;
-
/* F/W reset cannot be postponed */
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
goto device_reset;
@@ -1871,7 +1905,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
goto out;
hdev->device_release_watchdog_work.flags = flags;
- dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n",
+ dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
@@ -1939,37 +1973,27 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
-/*
- * hl_device_init - main initialization function for habanalabs device
- *
- * @hdev: pointer to habanalabs device structure
- *
- * Allocate an id for the device, do early initialization and then call the
- * ASIC specific initialization functions. Finally, create the cdev and the
- * Linux device to expose it to the user
- */
-int hl_device_init(struct hl_device *hdev, struct class *hclass)
+static int create_cdev(struct hl_device *hdev)
{
- int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
char *name;
- bool add_cdev_sysfs_on_err = false;
+ int rc;
hdev->cdev_idx = hdev->id / 2;
name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
- goto out_disabled;
+ goto out_err;
}
/* Initialize cdev and device structures */
- rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
+ rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name,
&hdev->cdev, &hdev->dev);
kfree(name);
if (rc)
- goto out_disabled;
+ goto out_err;
name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
@@ -1978,7 +2002,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
/* Initialize cdev and device structures for control device */
- rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
+ rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops,
name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
kfree(name);
@@ -1986,10 +2010,36 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto free_dev;
+ return 0;
+
+free_dev:
+ put_device(hdev->dev);
+out_err:
+ return rc;
+}
+
+/*
+ * hl_device_init - main initialization function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Allocate an id for the device, do early initialization and then call the
+ * ASIC specific initialization functions. Finally, create the cdev and the
+ * Linux device to expose it to the user
+ */
+int hl_device_init(struct hl_device *hdev)
+{
+ int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
+ bool add_cdev_sysfs_on_err = false;
+
+ rc = create_cdev(hdev);
+ if (rc)
+ goto out_disabled;
+
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
- goto free_dev_ctrl;
+ goto free_dev;
user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
hdev->asic_prop.user_interrupt_count;
@@ -2241,9 +2291,8 @@ free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
-free_dev_ctrl:
- put_device(hdev->dev_ctrl);
free_dev:
+ put_device(hdev->dev_ctrl);
put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
@@ -2364,7 +2413,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
@@ -2566,3 +2617,49 @@ void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
}
+
+static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
+{
+ struct hw_err_info *info = &hdev->captured_err_info.hw_err;
+
+ /* Capture only the first HW err */
+ if (atomic_cmpxchg(&info->event_detected, 0, 1))
+ return;
+
+ info->event.timestamp = ktime_to_ns(ktime_get());
+ info->event.event_id = event_id;
+
+ info->event_info_available = true;
+}
+
+void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_capture_hw_err(hdev, event_id);
+
+ if (event_mask)
+ *event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
+}
+
+static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
+{
+ struct fw_err_info *info = &hdev->captured_err_info.fw_err;
+
+ /* Capture only the first FW error */
+ if (atomic_cmpxchg(&info->event_detected, 0, 1))
+ return;
+
+ info->event.timestamp = ktime_to_ns(ktime_get());
+ info->event.err_type = fw_info->err_type;
+ if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
+ info->event.event_id = fw_info->event_id;
+
+ info->event_info_available = true;
+}
+
+void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
+{
+ hl_capture_fw_err(hdev, info);
+
+ if (info->event_mask)
+ *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
+}
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index da892d8fb3d6..7ea611392f8c 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -3152,7 +3152,7 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in
int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
dma_addr_t buff, u32 *size)
{
- struct cpucp_packet pkt = {0};
+ struct cpucp_packet pkt = {};
u64 result;
int rc = 0;
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index fa05e76d3d21..a6f5c2152b0a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -155,18 +155,12 @@ enum hl_mmu_enablement {
#define hl_asic_dma_alloc_coherent(hdev, size, dma_handle, flags) \
hl_asic_dma_alloc_coherent_caller(hdev, size, dma_handle, flags, __func__)
-#define hl_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle) \
- hl_cpu_accessible_dma_pool_alloc_caller(hdev, size, dma_handle, __func__)
-
#define hl_asic_dma_pool_zalloc(hdev, size, mem_flags, dma_handle) \
hl_asic_dma_pool_zalloc_caller(hdev, size, mem_flags, dma_handle, __func__)
#define hl_asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle) \
hl_asic_dma_free_coherent_caller(hdev, size, cpu_addr, dma_handle, __func__)
-#define hl_cpu_accessible_dma_pool_free(hdev, size, vaddr) \
- hl_cpu_accessible_dma_pool_free_caller(hdev, size, vaddr, __func__)
-
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
@@ -378,6 +372,7 @@ enum hl_cs_type {
CS_RESERVE_SIGNALS,
CS_UNRESERVE_SIGNALS,
CS_TYPE_ENGINE_CORE,
+ CS_TYPE_ENGINES,
CS_TYPE_FLUSH_PCI_HBW_WRITES,
};
@@ -592,6 +587,8 @@ struct hl_hints_range {
* @host_base_address: host physical start address for host DMA from device
* @host_end_address: host physical end address for host DMA from device
* @max_freq_value: current max clk frequency.
+ * @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
+ * in order to raise events toward FW.
* @clk_pll_index: clock PLL index that specify which PLL determines the clock
* we display to the user
* @mmu_pgt_size: MMU page tables total size.
@@ -612,8 +609,8 @@ struct hl_hints_range {
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @decoder_enabled_mask: which decoders are enabled.
- * @decoder_binning_mask: which decoders are binned, 0 means usable and 1
- * means binned (at most one binned decoder per dcore).
+ * @decoder_binning_mask: which decoders are binned, 0 means usable and 1 means binned.
+ * @rotator_enabled_mask: which rotators are enabled.
* @edma_enabled_mask: which EDMAs are enabled.
* @edma_binning_mask: which EDMAs are binned, 0 means usable and 1 means
* binned (at most one binned DMA).
@@ -648,7 +645,8 @@ struct hl_hints_range {
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
- * @num_engine_cores: number of engine cpu cores
+ * @num_engine_cores: number of engine cpu cores.
+ * @max_num_of_engines: maximum number of all engines in the ASIC.
* @num_of_special_blocks: special_blocks array size.
* @glbl_err_cause_num: global err cause number.
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
@@ -663,6 +661,8 @@ struct hl_hints_range {
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
* @user_dec_intr_count: number of decoder interrupts exposed to user.
+ * @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
+ * @unexpected_user_error_interrupt_id: interrupt id used to indicate an unexpected user error.
* @cache_line_size: device cache line size.
* @server_type: Server type that the ASIC is currently installed in.
* The value is according to enum hl_server_type in uapi file.
@@ -698,6 +698,7 @@ struct hl_hints_range {
* @supports_user_set_page_size: true if user can set the allocation page size.
* @dma_mask: the dma mask to be set for this device
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
+ * @supports_engine_modes: true if changing engines/engine_cores modes is supported.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -739,6 +740,7 @@ struct asic_fixed_properties {
u64 host_base_address;
u64 host_end_address;
u64 max_freq_value;
+ u64 engine_core_interrupt_reg_addr;
u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
@@ -759,6 +761,7 @@ struct asic_fixed_properties {
u32 cb_pool_cb_size;
u32 decoder_enabled_mask;
u32 decoder_binning_mask;
+ u32 rotator_enabled_mask;
u32 edma_enabled_mask;
u32 edma_binning_mask;
u32 max_pending_cs;
@@ -775,6 +778,7 @@ struct asic_fixed_properties {
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u32 num_engine_cores;
+ u32 max_num_of_engines;
u32 num_of_special_blocks;
u32 glbl_err_cause_num;
u32 hbw_flush_reg;
@@ -788,6 +792,8 @@ struct asic_fixed_properties {
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
u16 user_dec_intr_count;
+ u16 tpc_interrupt_id;
+ u16 unexpected_user_error_interrupt_id;
u16 cache_line_size;
u16 server_type;
u8 completion_queues_count;
@@ -811,6 +817,7 @@ struct asic_fixed_properties {
u8 supports_user_set_page_size;
u8 dma_mask;
u8 supports_advanced_cpucp_rc;
+ u8 supports_engine_modes;
};
/**
@@ -1096,6 +1103,8 @@ struct hl_cq {
enum hl_user_interrupt_type {
HL_USR_INTERRUPT_CQ = 0,
HL_USR_INTERRUPT_DECODER,
+ HL_USR_INTERRUPT_TPC,
+ HL_USR_INTERRUPT_UNEXPECTED
};
/**
@@ -1104,6 +1113,7 @@ enum hl_user_interrupt_type {
* @type: user interrupt type
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
+ * @timestamp: last timestamp taken upon interrupt
* @interrupt_id: msix interrupt id
*/
struct hl_user_interrupt {
@@ -1111,6 +1121,7 @@ struct hl_user_interrupt {
enum hl_user_interrupt_type type;
struct list_head wait_list_head;
spinlock_t wait_list_lock;
+ ktime_t timestamp;
u32 interrupt_id;
};
@@ -1562,6 +1573,7 @@ struct engines_data {
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
* @set_engine_cores: set a config command to engine cores
+ * @set_engines: set a config command to user engines
* @send_device_activity: indication to FW about device availability
* @set_dram_properties: set DRAM related properties.
* @set_binning_masks: set binning/enable masks for all relevant components.
@@ -1574,7 +1586,7 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
- void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
+ int (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
@@ -1701,6 +1713,8 @@ struct hl_asic_funcs {
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
u32 num_cores, u32 core_command);
+ int (*set_engines)(struct hl_device *hdev, u32 *engine_ids,
+ u32 num_engines, u32 engine_command);
int (*send_device_activity)(struct hl_device *hdev, bool open);
int (*set_dram_properties)(struct hl_device *hdev);
int (*set_binning_masks)(struct hl_device *hdev);
@@ -1824,7 +1838,7 @@ struct hl_cs_outcome_store {
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
- * this hits 0l. It is incremented on CS and CS_WAIT.
+ * this hits 0. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
* @outcome_store: storage data structure used to remember outcomes of completed
* command submissions for a long time after CS id wraparound.
@@ -2318,7 +2332,7 @@ struct hl_debugfs_entry {
* @userptr_list: list of available userptrs (virtual memory chunk descriptor).
* @userptr_spinlock: protects userptr_list.
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
- * @ctx_mem_hash_spinlock: protects cb_list.
+ * @ctx_mem_hash_mutex: protects list of available contexts with MMU mappings.
* @data_dma_blob_desc: data DMA descriptor of blob.
* @mon_dump_blob_desc: monitor dump descriptor of blob.
* @state_dump: data of the system states in case of a bad cs.
@@ -2349,7 +2363,7 @@ struct hl_dbg_device_entry {
struct list_head userptr_list;
spinlock_t userptr_spinlock;
struct list_head ctx_mem_hash_list;
- spinlock_t ctx_mem_hash_spinlock;
+ struct mutex ctx_mem_hash_mutex;
struct debugfs_blob_wrapper data_dma_blob_desc;
struct debugfs_blob_wrapper mon_dump_blob_desc;
char *state_dump[HL_STATE_DUMP_HIST_LEN];
@@ -2974,8 +2988,8 @@ struct cs_timeout_info {
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
* @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
- * should be equal to 1 incase of undefined opcode
- * in Upper-CP (specific stream) and equal to 4 incase
+ * should be equal to 1 in case of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 in case
* of undefined opcode in Lower-CP.
* @engine_id: engine-id that the error occurred on
* @stream_id: the stream id the error occurred on. In case the stream equals to
@@ -3032,17 +3046,55 @@ struct razwi_info {
};
/**
+ * struct hw_err_info - HW error information.
+ * @event: holds information on the event.
+ * @event_detected: if set as 1, then a HW event was discovered for the
+ * first time after the driver has finished booting-up.
+ * currently we assume that only fatal events (that require hard-reset) are
+ * reported so we don't care of the others that might follow it.
+ * so once changed to 1, it will remain that way.
+ * TODO: support multiple events.
+ * @event_info_available: indicates that a HW event info is now available.
+ */
+struct hw_err_info {
+ struct hl_info_hw_err_event event;
+ atomic_t event_detected;
+ bool event_info_available;
+};
+
+/**
+ * struct fw_err_info - FW error information.
+ * @event: holds information on the event.
+ * @event_detected: if set as 1, then a FW event was discovered for the
+ * first time after the driver has finished booting-up.
+ * currently we assume that only fatal events (that require hard-reset) are
+ * reported so we don't care of the others that might follow it.
+ * so once changed to 1, it will remain that way.
+ * TODO: support multiple events.
+ * @event_info_available: indicates that a HW event info is now available.
+ */
+struct fw_err_info {
+ struct hl_info_fw_err_event event;
+ atomic_t event_detected;
+ bool event_info_available;
+};
+
+/**
* struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information.
* @razwi_info: RAZWI information.
* @undef_opcode: undefined opcode information.
* @page_fault_info: page fault information.
+ * @hw_err: (fatal) hardware error information.
+ * @fw_err: firmware error information.
*/
struct hl_error_info {
struct cs_timeout_info cs_timeout;
struct razwi_info razwi_info;
struct undefined_opcode_info undef_opcode;
struct page_fault_info page_fault_info;
+ struct hw_err_info hw_err;
+ struct fw_err_info fw_err;
};
/**
@@ -3090,6 +3142,7 @@ struct hl_reset_info {
* (required only for PCI address match mode)
* @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
+ * @hclass: pointer to the habanalabs class.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
@@ -3104,6 +3157,8 @@ struct hl_reset_info {
* @user_interrupt: array of hl_user_interrupt. upon the corresponding user
* interrupt, driver will monitor the list of fences
* registered to this interrupt.
+ * @tpc_interrupt: single TPC interrupt for all TPCs.
+ * @unexpected_error_interrupt: single interrupt for unexpected user error indication.
* @common_user_cq_interrupt: common user CQ interrupt for all user CQ interrupts.
* upon any user CQ interrupt, driver will monitor the
* list of fences registered to this common structure.
@@ -3199,6 +3254,7 @@ struct hl_reset_info {
* drams are binned-out
* @tpc_binning: contains mask of tpc engines that is received from the f/w which indicates which
* tpc engines are binned-out
+ * @dmabuf_export_cnt: number of dma-buf exporting.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
@@ -3253,6 +3309,8 @@ struct hl_reset_info {
* @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
* @reset_upon_device_release: reset the device when the user closes the file descriptor of the
* device.
+ * @supports_ctx_switch: true if a ctx switch is required upon first submission.
+ * @support_preboot_binning: true if we support read binning info from preboot.
* @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
* @fw_components: Controls which f/w components to load to the device. There are multiple f/w
* stages and sometimes we want to stop at a certain stage. Used only for testing.
@@ -3266,14 +3324,13 @@ struct hl_reset_info {
* Used only for testing.
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
- * @supports_ctx_switch: true if a ctx switch is required upon first submission.
- * @support_preboot_binning: true if we support read binning info from preboot.
*/
struct hl_device {
struct pci_dev *pdev;
u64 pcie_bar_phys[HL_PCI_NUM_BARS];
void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
+ struct class *hclass;
struct cdev cdev;
struct cdev cdev_ctrl;
struct device *dev;
@@ -3286,6 +3343,8 @@ struct hl_device {
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct hl_user_interrupt *user_interrupt;
+ struct hl_user_interrupt tpc_interrupt;
+ struct hl_user_interrupt unexpected_error_interrupt;
struct hl_user_interrupt common_user_cq_interrupt;
struct hl_user_interrupt common_decoder_interrupt;
struct hl_cs **shadow_cs_queue;
@@ -3369,7 +3428,7 @@ struct hl_device {
u64 fw_comms_poll_interval_usec;
u64 dram_binning;
u64 tpc_binning;
-
+ atomic_t dmabuf_export_cnt;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
@@ -3412,7 +3471,7 @@ struct hl_device {
u8 supports_ctx_switch;
u8 support_preboot_binning;
- /* Parameters for bring-up */
+ /* Parameters for bring-up to be upstreamed */
u64 nic_ports_mask;
u64 fw_components;
u8 mmu_enable;
@@ -3450,6 +3509,20 @@ struct hl_cs_encaps_sig_handle {
u32 count;
};
+/**
+ * struct hl_info_fw_err_info - firmware error information structure
+ * @err_type: The type of error detected (or reported).
+ * @event_mask: Pointer to the event mask to be modified with the detected error flag
+ * (can be NULL)
+ * @event_id: The id of the event that reported the error
+ * (applicable when err_type is HL_INFO_FW_REPORTED_ERR).
+ */
+struct hl_info_fw_err_info {
+ enum hl_info_fw_err_type err_type;
+ u64 *event_mask;
+ u16 event_id;
+};
+
/*
* IOCTLs
*/
@@ -3474,6 +3547,10 @@ struct hl_ioctl_desc {
hl_ioctl_t *func;
};
+static inline bool hl_is_fw_ver_below_1_9(struct hl_device *hdev)
+{
+ return (hdev->fw_major_version < 42);
+}
/*
* Kernel module functions that can be accessed by entire module
@@ -3537,14 +3614,12 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, const char *caller);
void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, const char *caller);
-void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
- dma_addr_t *dma_handle, const char *caller);
-void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
- const char *caller);
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller);
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
@@ -3591,7 +3666,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
-irqreturn_t hl_irq_handler_default(int irq, void *arg);
+irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
int hl_asid_init(struct hl_device *hdev);
@@ -3612,7 +3687,7 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
-int hl_device_init(struct hl_device *hdev, struct class *hclass);
+int hl_device_init(struct hl_device *hdev);
void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev);
@@ -3662,6 +3737,7 @@ bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
void hl_multi_cs_completion_init(struct hl_device *hdev);
+u32 hl_get_active_cs_num(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -3861,6 +3937,7 @@ const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
@@ -3879,6 +3956,8 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu);
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask);
+void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask);
+void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 03dae57dc838..a4b3f50f1cba 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -12,7 +12,6 @@
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
@@ -221,12 +220,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
+ memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info));
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
- atomic_set(&hdev->captured_err_info.razwi_info.razwi_detected, 0);
- atomic_set(&hdev->captured_err_info.page_fault_info.page_fault_detected, 0);
hdev->captured_err_info.undef_opcode.write_enable = true;
- hdev->captured_err_info.razwi_info.razwi_info_available = false;
- hdev->captured_err_info.page_fault_info.page_fault_info_available = false;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -237,6 +233,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
hl_mem_mgr_fini(&hpriv->mem_mgr);
+ hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->ctx_lock);
@@ -324,6 +321,7 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->major = hl_major;
+ hdev->hclass = hl_class;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
@@ -550,9 +548,7 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdev);
- pci_enable_pcie_error_reporting(pdev);
-
- rc = hl_device_init(hdev, hl_class);
+ rc = hl_device_init(hdev);
if (rc) {
dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
rc = -ENODEV;
@@ -562,7 +558,6 @@ static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
disable_device:
- pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
@@ -585,7 +580,6 @@ static void hl_pci_remove(struct pci_dev *pdev)
return;
hl_device_fini(hdev);
- pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
}
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 5005e6fca691..203ee857810c 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -102,11 +102,15 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
+ hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
hw_ip.security_enabled = prop->fw_security_enabled;
hw_ip.revision_id = hdev->pdev->revision;
+ hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
+ hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
+ hw_ip.reserved_dram_size = dram_kmd_size;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@@ -830,6 +834,50 @@ static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
}
+static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_device *hdev = hpriv->hdev;
+ u32 user_buf_size = args->return_size;
+ struct hw_err_info *info;
+ int rc;
+
+ if ((!user_buf_size) || (!user_buf))
+ return -EINVAL;
+
+ if (user_buf_size < sizeof(struct hl_info_hw_err_event))
+ return -ENOMEM;
+
+ info = &hdev->captured_err_info.hw_err;
+ if (!info->event_info_available)
+ return -ENOENT;
+
+ rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
+ return rc ? -EFAULT : 0;
+}
+
+static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
+ struct hl_device *hdev = hpriv->hdev;
+ u32 user_buf_size = args->return_size;
+ struct fw_err_info *info;
+ int rc;
+
+ if ((!user_buf_size) || (!user_buf))
+ return -EINVAL;
+
+ if (user_buf_size < sizeof(struct hl_info_fw_err_event))
+ return -ENOMEM;
+
+ info = &hdev->captured_err_info.fw_err;
+ if (!info->event_info_available)
+ return -ENOENT;
+
+ rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
+ return rc ? -EFAULT : 0;
+}
+
static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
{
void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
@@ -950,6 +998,14 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
+ case HL_INFO_HW_ERR_EVENT:
+ return hw_err_info(hpriv, args);
+
+ case HL_INFO_FW_ERR_EVENT:
+ return fw_err_info(hpriv, args);
+
+ case HL_INFO_DRAM_USAGE:
+ return dram_usage_info(hpriv, args);
default:
break;
}
@@ -962,10 +1018,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
}
switch (args->op) {
- case HL_INFO_DRAM_USAGE:
- rc = dram_usage_info(hpriv, args);
- break;
-
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;
diff --git a/drivers/accel/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
index 04844e843a7b..fab1abc5c910 100644
--- a/drivers/accel/habanalabs/common/irq.c
+++ b/drivers/accel/habanalabs/common/irq.c
@@ -280,7 +280,6 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
struct list_head *ts_reg_free_list_head = NULL;
struct timestamp_reg_work_obj *job;
bool reg_node_handle_fail = false;
- ktime_t now = ktime_get();
int rc;
/* For registration nodes:
@@ -303,13 +302,13 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
- &ts_reg_free_list_head, now);
+ &ts_reg_free_list_head, intr->timestamp);
if (rc)
reg_node_handle_fail = true;
}
} else {
/* Handle wait target value node */
- pend->fence.timestamp = now;
+ pend->fence.timestamp = intr->timestamp;
complete_all(&pend->fence.completion);
}
}
@@ -326,6 +325,26 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
}
}
+static void handle_tpc_interrupt(struct hl_device *hdev)
+{
+ u64 event_mask;
+ u32 flags;
+
+ event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
+ HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
+ HL_NOTIFIER_EVENT_DEVICE_RESET;
+
+ flags = HL_DRV_RESET_DELAY;
+
+ dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
+ hl_device_cond_reset(hdev, flags, event_mask);
+}
+
+static void handle_unexpected_user_interrupt(struct hl_device *hdev)
+{
+ dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
+}
+
/**
* hl_irq_handler_user_interrupt - irq handler for user interrupts
*
@@ -336,6 +355,23 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
+
+ user_int->timestamp = ktime_get();
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
+ * This function is invoked by threaded irq mechanism
+ *
+ * @irq: irq number
+ * @arg: pointer to user interrupt structure
+ *
+ */
+irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
+{
+ struct hl_user_interrupt *user_int = arg;
struct hl_device *hdev = user_int->hdev;
switch (user_int->type) {
@@ -351,6 +387,12 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
/* Handle decoder interrupt registered on this specific irq */
handle_user_interrupt(hdev, user_int);
break;
+ case HL_USR_INTERRUPT_TPC:
+ handle_tpc_interrupt(hdev);
+ break;
+ case HL_USR_INTERRUPT_UNEXPECTED:
+ handle_unexpected_user_interrupt(hdev);
+ break;
default:
break;
}
@@ -359,24 +401,6 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
}
/**
- * hl_irq_handler_default - default irq handler
- *
- * @irq: irq number
- * @arg: pointer to user interrupt structure
- *
- */
-irqreturn_t hl_irq_handler_default(int irq, void *arg)
-{
- struct hl_user_interrupt *user_interrupt = arg;
- struct hl_device *hdev = user_interrupt->hdev;
- u32 interrupt_id = user_interrupt->interrupt_id;
-
- dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
-
- return IRQ_HANDLED;
-}
-
-/**
* hl_irq_handler_eq - irq handler for event queue
*
* @irq: irq number
@@ -405,11 +429,10 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
if ((hdev->event_queue.check_eqe_index) &&
- (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
- != cur_eqe_index)) {
+ (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
dev_dbg(hdev->dev,
- "EQE 0x%x in queue is ready but index does not match %d!=%d",
- eq_base[eq->ci].hdr.ctl,
+ "EQE %#x in queue is ready but index does not match %d!=%d",
+ cur_eqe,
((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
cur_eqe_index);
break;
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 761a47e89b00..17b79d717896 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -235,10 +235,8 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
}
rc = hl_pin_host_memory(hdev, addr, size, userptr);
- if (rc) {
- dev_err(hdev->dev, "Failed to pin host memory\n");
+ if (rc)
goto pin_err;
- }
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
@@ -1097,10 +1095,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
- if (rc) {
- dev_err(hdev->dev, "failed to get userptr from va\n");
+ if (rc)
return rc;
- }
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack, false);
@@ -1270,6 +1266,18 @@ init_page_pack_err:
return rc;
}
+/* Should be called while the context's mem_hash_lock is taken */
+static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
+{
+ struct hl_vm_hash_node *hnode;
+
+ hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
+ if (vaddr == hnode->vaddr)
+ return hnode;
+
+ return NULL;
+}
+
/**
* unmap_device_va() - unmap the given device virtual address.
* @ctx: pointer to the context structure.
@@ -1285,10 +1293,10 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
{
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
u64 vaddr = args->unmap.device_virt_addr;
- struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
+ struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
enum vm_type *vm_type;
bool is_userptr;
@@ -1298,15 +1306,10 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
- hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
- if (vaddr == hnode->vaddr)
- break;
-
+ hnode = get_vm_hash_node_locked(ctx, vaddr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
- dev_err(hdev->dev,
- "unmap failed, no mem hnode for vaddr 0x%llx\n",
- vaddr);
+ dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
return -EINVAL;
}
@@ -1779,6 +1782,44 @@ static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
kfree(sgt);
}
+static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_hash_node *hnode;
+
+ /* get the memory handle */
+ mutex_lock(&ctx->mem_hash_lock);
+ hnode = get_vm_hash_node_locked(ctx, addr);
+ if (!hnode) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (upper_32_bits(hnode->handle)) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
+ hnode->handle, addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * node found, increase export count so this memory cannot be unmapped
+ * and the hash node cannot be deleted.
+ */
+ hnode->export_cnt++;
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ return hnode;
+}
+
+static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
+{
+ mutex_lock(&ctx->mem_hash_lock);
+ hnode->export_cnt--;
+ mutex_unlock(&ctx->mem_hash_lock);
+}
+
static void hl_release_dmabuf(struct dma_buf *dmabuf)
{
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
@@ -1789,13 +1830,15 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
ctx = hl_dmabuf->ctx;
- if (hl_dmabuf->memhash_hnode) {
- mutex_lock(&ctx->mem_hash_lock);
- hl_dmabuf->memhash_hnode->export_cnt--;
- mutex_unlock(&ctx->mem_hash_lock);
- }
+ if (hl_dmabuf->memhash_hnode)
+ memhash_node_export_put(ctx, hl_dmabuf->memhash_hnode);
+ atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
+
+ /* Paired with get_file() in export_dmabuf() */
+ fput(ctx->hpriv->filp);
+
kfree(hl_dmabuf);
}
@@ -1834,6 +1877,13 @@ static int export_dmabuf(struct hl_ctx *ctx,
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
+ atomic_inc(&ctx->hdev->dmabuf_export_cnt);
+
+ /* Get compute device file to enforce release order, such that all exported dma-buf will be
+ * released first and only then the compute device.
+ * Paired with fput() in hl_release_dmabuf().
+ */
+ get_file(ctx->hpriv->filp);
*dmabuf_fd = fd;
@@ -1933,47 +1983,6 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
return 0;
}
-static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
-{
- struct hl_device *hdev = ctx->hdev;
- struct hl_vm_hash_node *hnode;
-
- /* get the memory handle */
- mutex_lock(&ctx->mem_hash_lock);
- hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)addr)
- if (addr == hnode->vaddr)
- break;
-
- if (!hnode) {
- mutex_unlock(&ctx->mem_hash_lock);
- dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
- return ERR_PTR(-EINVAL);
- }
-
- if (upper_32_bits(hnode->handle)) {
- mutex_unlock(&ctx->mem_hash_lock);
- dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
- hnode->handle, addr);
- return ERR_PTR(-EINVAL);
- }
-
- /*
- * node found, increase export count so this memory cannot be unmapped
- * and the hash node cannot be deleted.
- */
- hnode->export_cnt++;
- mutex_unlock(&ctx->mem_hash_lock);
-
- return hnode;
-}
-
-static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
-{
- mutex_lock(&ctx->mem_hash_lock);
- hnode->export_cnt--;
- mutex_unlock(&ctx->mem_hash_lock);
-}
-
static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
struct hl_vm_hash_node *hnode)
{
@@ -2221,11 +2230,11 @@ static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
* in order to be able to read the timestamp.
- * in additon it'll allocate an extra buffer for registration management.
+ * in addition it'll allocate an extra buffer for registration management.
* since we cannot fail during registration for out-of-memory situation, so
* we'll prepare a pool which will be used as user interrupt nodes and instead
* of dynamically allocating nodes while registration we'll pick the node from
- * this pool. in addtion it'll add node to the mapping hash which will be used
+ * this pool. in addition it'll add node to the mapping hash which will be used
* to map user ts buffer to the internal kernel ts buffer.
* @hpriv: pointer to the private data of the fd
* @args: ioctl input
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 0f2759e26547..c4d84df355b0 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -275,7 +275,7 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
- "%s, Memory mmap failed, already mmaped to user\n",
+ "%s, Memory mmap failed, already mapped to user\n",
buf->behavior->topic);
rc = -EINVAL;
goto put_mem;
@@ -341,8 +341,19 @@ void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
}
+}
- /* TODO: can it happen that some buffer is still in use at this point? */
+/**
+ * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
+ * @mmg: parent unified memory manager
+ *
+ * Destroy the memory manager IDR.
+ * Shall be called when IDR is empty and no memory buffers are in use.
+ */
+void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
+{
+ if (!idr_is_empty(&mmg->handles))
+ dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
idr_destroy(&mmg->handles);
}
diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index a42ae8bc61e8..17581b1bcc77 100644
--- a/drivers/accel/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -540,8 +540,8 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
u32 page_off;
/*
- * Bit arithmetics cannot be used for non power of two page
- * sizes. In addition, since bit arithmetics is not used,
+ * Bit arithmetic cannot be used for non power of two page
+ * sizes. In addition, since bit arithmetic is not used,
* we cannot ignore dram base. All that shall be considered.
*/
@@ -757,7 +757,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
* @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
- * @virt_addr: virtual address fro the translation.
+ * @virt_addr: virtual address for the translation.
*
* @return the matching PTE value on success, otherwise U64_MAX.
*/
diff --git a/drivers/accel/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c
index 5f03ade07ead..297e6e44fd0c 100644
--- a/drivers/accel/habanalabs/common/security.c
+++ b/drivers/accel/habanalabs/common/security.c
@@ -502,7 +502,7 @@ free_glbl_sec:
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
- const struct range *regs_range_array, u32 regs_range_array_size)
+ const struct range *user_regs_range_array, u32 user_regs_range_array_size)
{
int i;
struct hl_block_glbl_sec *glbl_sec;
@@ -514,8 +514,8 @@ int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
- hl_unsecure_registers_range(hdev, regs_range_array,
- regs_range_array_size, 0, pb_blocks, glbl_sec,
+ hl_unsecure_registers_range(hdev, user_regs_range_array,
+ user_regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
/* Fill all blocks with the same configuration */
diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h
index 234b4a6ed8bc..d7a3b3e82ea4 100644
--- a/drivers/accel/habanalabs/common/security.h
+++ b/drivers/accel/habanalabs/common/security.h
@@ -10,7 +10,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
-extern struct hl_device *hdev;
+struct hl_device;
/* special blocks */
#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index bb858b94e1e8..08a4b1cf2b42 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -656,6 +656,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI_EVENT_SIZE;
+ prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
set_default_power_values(hdev);
@@ -679,6 +680,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = USHRT_MAX;
+ prop->tpc_interrupt_id = USHRT_MAX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -867,13 +869,18 @@ pci_init:
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
return 0;
@@ -3718,7 +3725,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
if (rc) {
dev_err(hdev->dev,
"failed to set hop0 addr for asid %d\n", i);
- goto err;
+ return rc;
}
}
@@ -3729,7 +3736,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
/* mem cache invalidation */
WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
- hl_mmu_invalidate_cache(hdev, true, 0);
+ rc = hl_mmu_invalidate_cache(hdev, true, 0);
+ if (rc)
+ return rc;
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@@ -3745,9 +3754,6 @@ static int gaudi_mmu_init(struct hl_device *hdev)
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
-
-err:
- return rc;
}
static int gaudi_load_firmware_to_device(struct hl_device *hdev)
@@ -4068,7 +4074,7 @@ disable_queues:
return rc;
}
-static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -4078,7 +4084,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
if (!hard_reset) {
dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
- return;
+ return 0;
}
if (hdev->pldm) {
@@ -4199,10 +4205,10 @@ skip_reset:
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
- if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
- dev_err(hdev->dev,
- "Timeout while waiting for device to reset 0x%x\n",
- status);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
+ dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
+ return -ETIMEDOUT;
+ }
if (gaudi) {
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM |
@@ -4215,6 +4221,7 @@ skip_reset:
hdev->device_cpu_is_halted = false;
}
+ return 0;
}
static int gaudi_suspend(struct hl_device *hdev)
@@ -7297,7 +7304,7 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
- bool razwi, u64 *event_mask)
+ bool check_razwi, u64 *event_mask)
{
bool is_read = false, is_write = false;
u16 engine_id[2], num_of_razwi_eng = 0;
@@ -7316,7 +7323,7 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- if (razwi) {
+ if (check_razwi) {
gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
&is_write);
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
@@ -7333,8 +7340,9 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
num_of_razwi_eng = 1;
}
- hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags,
- event_mask);
+ if (razwi_flags)
+ hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng,
+ razwi_flags, event_mask);
}
}
@@ -7633,6 +7641,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type,
static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_info_fw_err_info fw_err_info;
u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u32 fw_fatal_err_flag = 0, flags = 0;
@@ -7911,7 +7920,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ fw_err_info.err_type = HL_INFO_FW_REPORTED_ERR;
+ fw_err_info.event_id = event_type;
+ fw_err_info.event_mask = &event_mask;
+ hl_handle_fw_err(hdev, &fw_err_info);
goto reset_device;
default:
@@ -7942,6 +7954,10 @@ reset_device:
}
if (reset_required) {
+ /* escalate general hw errors to critical/fatal error */
+ if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
+ hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+
hl_device_cond_reset(hdev, flags, event_mask);
} else {
hl_fw_unmask_irq(hdev, event_type);
@@ -8403,19 +8419,26 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
-
- hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&hdev->mmu_lock);
-
if (rc)
goto unreserve_internal_cb_pool;
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ if (rc)
+ goto unmap_internal_cb_pool;
+
+ mutex_unlock(&hdev->mmu_lock);
+
return 0;
+unmap_internal_cb_pool:
+ hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
+ HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index 6f415fa94eee..edcbda3d9b40 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -23,7 +23,8 @@
#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
#define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
-#define GAUDI2_RESET_POLL_TIMEOUT_USEC 50000 /* 50ms */
+
+#define GAUDI2_RESET_POLL_TIMEOUT_USEC 500000 /* 500ms */
#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC 3000000 /* 3s */
@@ -86,10 +87,11 @@
#define KDMA_TIMEOUT_USEC USEC_PER_SEC
-#define IS_DMA_IDLE(dma_core_idle_ind_mask) \
- (!((dma_core_idle_ind_mask) & \
- ((DCORE0_EDMA0_CORE_IDLE_IND_MASK_DESC_CNT_STS_MASK) | \
- (DCORE0_EDMA0_CORE_IDLE_IND_MASK_COMP_MASK))))
+#define IS_DMA_IDLE(dma_core_sts0) \
+ (!((dma_core_sts0) & (DCORE0_EDMA0_CORE_STS0_BUSY_MASK)))
+
+#define IS_DMA_HALTED(dma_core_sts1) \
+ ((dma_core_sts1) & (DCORE0_EDMA0_CORE_STS1_IS_HALT_MASK))
#define IS_MME_IDLE(mme_arch_sts) (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
@@ -132,6 +134,282 @@
#define ENGINE_ID_DCORE_OFFSET (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
+/* RAZWI initiator coordinates */
+#define RAZWI_GET_AXUSER_XY(x) \
+ ((x & 0xF8001FF0) >> 4)
+
+#define RAZWI_GET_AXUSER_LOW_XY(x) \
+ ((x & 0x00001FF0) >> 4)
+
+#define RAZWI_INITIATOR_AXUER_L_X_SHIFT 0
+#define RAZWI_INITIATOR_AXUER_L_X_MASK 0x1F
+#define RAZWI_INITIATOR_AXUER_L_Y_SHIFT 5
+#define RAZWI_INITIATOR_AXUER_L_Y_MASK 0xF
+
+#define RAZWI_INITIATOR_AXUER_H_X_SHIFT 23
+#define RAZWI_INITIATOR_AXUER_H_X_MASK 0x1F
+
+#define RAZWI_INITIATOR_ID_X_Y_LOW(x, y) \
+ ((((y) & RAZWI_INITIATOR_AXUER_L_Y_MASK) << RAZWI_INITIATOR_AXUER_L_Y_SHIFT) | \
+ (((x) & RAZWI_INITIATOR_AXUER_L_X_MASK) << RAZWI_INITIATOR_AXUER_L_X_SHIFT))
+
+#define RAZWI_INITIATOR_ID_X_HIGH(x) \
+ (((x) & RAZWI_INITIATOR_AXUER_H_X_MASK) << RAZWI_INITIATOR_AXUER_H_X_SHIFT)
+
+#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
+ (RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
+
+#define PSOC_RAZWI_ENG_STR_SIZE 128
+#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
+
+struct gaudi2_razwi_info {
+ u32 axuser_xy;
+ u32 rtr_ctrl;
+ u16 eng_id;
+ char *eng_name;
+};
+
+static struct gaudi2_razwi_info common_razwi_info[] = {
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_0, "DEC0"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_DEC_1, "DEC1"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 18), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_0, "DEC2"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_DEC_1, "DEC3"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_0, "DEC4"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_DEC_1, "DEC5"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 18), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_0, "DEC6"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_DEC_1, "DEC7"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 6), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC8"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 7), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC9"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 4, 2), mmDCORE0_RTR1_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_0, "TPC0"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 4, 4), mmDCORE0_RTR1_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_1, "TPC1"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 4, 2), mmDCORE0_RTR2_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_2, "TPC2"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 4, 4), mmDCORE0_RTR2_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_3, "TPC3"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 4, 2), mmDCORE0_RTR3_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_4, "TPC4"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 4, 4), mmDCORE0_RTR3_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_TPC_5, "TPC5"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 4, 14), mmDCORE1_RTR6_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_0, "TPC6"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 4, 16), mmDCORE1_RTR6_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_1, "TPC7"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 4, 14), mmDCORE1_RTR5_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_2, "TPC8"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 4, 16), mmDCORE1_RTR5_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_3, "TPC9"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 4, 14), mmDCORE1_RTR4_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_4, "TPC10"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 4, 16), mmDCORE1_RTR4_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_TPC_5, "TPC11"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 11, 2), mmDCORE2_RTR3_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_0, "TPC12"},
+ {RAZWI_INITIATOR_ID_X_Y(5, 11, 4), mmDCORE2_RTR3_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_1, "TPC13"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 11, 2), mmDCORE2_RTR2_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_2, "TPC14"},
+ {RAZWI_INITIATOR_ID_X_Y(4, 11, 4), mmDCORE2_RTR2_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_3, "TPC15"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 11, 2), mmDCORE2_RTR1_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_4, "TPC16"},
+ {RAZWI_INITIATOR_ID_X_Y(3, 11, 4), mmDCORE2_RTR1_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_TPC_5, "TPC17"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 11, 14), mmDCORE3_RTR4_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_0, "TPC18"},
+ {RAZWI_INITIATOR_ID_X_Y(14, 11, 16), mmDCORE3_RTR4_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_1, "TPC19"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 11, 14), mmDCORE3_RTR5_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_2, "TPC20"},
+ {RAZWI_INITIATOR_ID_X_Y(15, 11, 16), mmDCORE3_RTR5_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_3, "TPC21"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 11, 14), mmDCORE3_RTR6_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_4, "TPC22"},
+ {RAZWI_INITIATOR_ID_X_Y(16, 11, 16), mmDCORE3_RTR6_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC23"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC24"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 8), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC0_0, "NIC0"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 10), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC0_1, "NIC1"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 12), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC1_0, "NIC2"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC1_1, "NIC3"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 15), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC2_0, "NIC4"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC2_1, "NIC5"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC3_0, "NIC6"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 6), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC3_1, "NIC7"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 8), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC4_0, "NIC8"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 12), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC4_1, "NIC9"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC5_0, "NIC10"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_NIC5_1, "NIC11"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PDMA_0, "PDMA0"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 3), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PDMA_1, "PDMA1"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "PMMU"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 4, 5), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "PCIE"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 16), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ARC_FARM, "ARC_FARM"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 4, 17), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_KDMA, "KDMA"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_0, "EDMA0"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_EDMA_1, "EDMA1"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_0, "EDMA2"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_EDMA_1, "EDMA3"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_0, "EDMA4"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_EDMA_1, "EDMA5"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_0, "EDMA6"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_EDMA_1, "EDMA7"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU0"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU1"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU2"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU3"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU4"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU5"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU6"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU7"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU8"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU9"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU10"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU11"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU12"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU13"},
+ {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU14"},
+ {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_SIZE, "HMMU15"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ROT_0, "ROT0"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_ROT_1, "ROT1"},
+ {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PSOC, "CPU"},
+ {RAZWI_INITIATOR_ID_X_Y(17, 11, 11), mmDCORE3_RTR7_CTRL_BASE,
+ GAUDI2_ENGINE_ID_PSOC, "PSOC"}
+};
+
+static struct gaudi2_razwi_info mme_razwi_info[] = {
+ /* MME X high coordinate is N/A, hence using only low coordinates */
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
+ GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
+ GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
+ GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE4"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_WR"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_RD"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE0"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE1"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE2"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE3"},
+ {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
+ GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE4"}
+};
+
enum hl_pmmu_fatal_cause {
LATENCY_RD_OUT_FIFO_OVERRUN,
LATENCY_WR_OUT_FIFO_OVERRUN,
@@ -1437,6 +1715,34 @@ static const u32 gaudi2_tpc_cfg_blocks_bases[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_CFG_BASE,
};
+static const u32 gaudi2_tpc_eml_cfg_blocks_bases[TPC_ID_SIZE] = {
+ [TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_EML_CFG_BASE,
+ [TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_EML_CFG_BASE,
+ [TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_EML_CFG_BASE,
+};
+
const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_0] = mmROT0_BASE,
[ROTATOR_ID_1] = mmROT1_BASE
@@ -1475,6 +1781,56 @@ static const u32 gaudi2_rot_id_to_queue_id[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_1] = GAUDI2_QUEUE_ID_ROT_1_0,
};
+static const u32 gaudi2_tpc_engine_id_to_tpc_id[] = {
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_0] = TPC_ID_DCORE0_TPC0,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_1] = TPC_ID_DCORE0_TPC1,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_2] = TPC_ID_DCORE0_TPC2,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_3] = TPC_ID_DCORE0_TPC3,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_4] = TPC_ID_DCORE0_TPC4,
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_5] = TPC_ID_DCORE0_TPC5,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_0] = TPC_ID_DCORE1_TPC0,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_1] = TPC_ID_DCORE1_TPC1,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_2] = TPC_ID_DCORE1_TPC2,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_3] = TPC_ID_DCORE1_TPC3,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_4] = TPC_ID_DCORE1_TPC4,
+ [GAUDI2_DCORE1_ENGINE_ID_TPC_5] = TPC_ID_DCORE1_TPC5,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_0] = TPC_ID_DCORE2_TPC0,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_1] = TPC_ID_DCORE2_TPC1,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_2] = TPC_ID_DCORE2_TPC2,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_3] = TPC_ID_DCORE2_TPC3,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_4] = TPC_ID_DCORE2_TPC4,
+ [GAUDI2_DCORE2_ENGINE_ID_TPC_5] = TPC_ID_DCORE2_TPC5,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_0] = TPC_ID_DCORE3_TPC0,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_1] = TPC_ID_DCORE3_TPC1,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_2] = TPC_ID_DCORE3_TPC2,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_3] = TPC_ID_DCORE3_TPC3,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_4] = TPC_ID_DCORE3_TPC4,
+ [GAUDI2_DCORE3_ENGINE_ID_TPC_5] = TPC_ID_DCORE3_TPC5,
+ /* the PCI TPC is placed last (mapped liked HW) */
+ [GAUDI2_DCORE0_ENGINE_ID_TPC_6] = TPC_ID_DCORE0_TPC6,
+};
+
+static const u32 gaudi2_mme_engine_id_to_mme_id[] = {
+ [GAUDI2_DCORE0_ENGINE_ID_MME] = MME_ID_DCORE0,
+ [GAUDI2_DCORE1_ENGINE_ID_MME] = MME_ID_DCORE1,
+ [GAUDI2_DCORE2_ENGINE_ID_MME] = MME_ID_DCORE2,
+ [GAUDI2_DCORE3_ENGINE_ID_MME] = MME_ID_DCORE3,
+};
+
+static const u32 gaudi2_edma_engine_id_to_edma_id[] = {
+ [GAUDI2_ENGINE_ID_PDMA_0] = DMA_CORE_ID_PDMA0,
+ [GAUDI2_ENGINE_ID_PDMA_1] = DMA_CORE_ID_PDMA1,
+ [GAUDI2_DCORE0_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA0,
+ [GAUDI2_DCORE0_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA1,
+ [GAUDI2_DCORE1_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA2,
+ [GAUDI2_DCORE1_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA3,
+ [GAUDI2_DCORE2_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA4,
+ [GAUDI2_DCORE2_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA5,
+ [GAUDI2_DCORE3_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA6,
+ [GAUDI2_DCORE3_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA7,
+ [GAUDI2_ENGINE_ID_KDMA] = DMA_CORE_ID_KDMA,
+};
+
const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0,
@@ -1499,41 +1855,6 @@ static const char gaudi2_vdec_irq_name[GAUDI2_VDEC_MSIX_ENTRIES][GAUDI2_MAX_STRI
"gaudi2 vdec s_1", "gaudi2 vdec s_1 abnormal"
};
-static const u32 rtr_coordinates_to_rtr_id[NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES] = {
- RTR_ID_X_Y(2, 4),
- RTR_ID_X_Y(3, 4),
- RTR_ID_X_Y(4, 4),
- RTR_ID_X_Y(5, 4),
- RTR_ID_X_Y(6, 4),
- RTR_ID_X_Y(7, 4),
- RTR_ID_X_Y(8, 4),
- RTR_ID_X_Y(9, 4),
- RTR_ID_X_Y(10, 4),
- RTR_ID_X_Y(11, 4),
- RTR_ID_X_Y(12, 4),
- RTR_ID_X_Y(13, 4),
- RTR_ID_X_Y(14, 4),
- RTR_ID_X_Y(15, 4),
- RTR_ID_X_Y(16, 4),
- RTR_ID_X_Y(17, 4),
- RTR_ID_X_Y(2, 11),
- RTR_ID_X_Y(3, 11),
- RTR_ID_X_Y(4, 11),
- RTR_ID_X_Y(5, 11),
- RTR_ID_X_Y(6, 11),
- RTR_ID_X_Y(7, 11),
- RTR_ID_X_Y(8, 11),
- RTR_ID_X_Y(9, 11),
- RTR_ID_X_Y(0, 0),/* 24 no id */
- RTR_ID_X_Y(0, 0),/* 25 no id */
- RTR_ID_X_Y(0, 0),/* 26 no id */
- RTR_ID_X_Y(0, 0),/* 27 no id */
- RTR_ID_X_Y(14, 11),
- RTR_ID_X_Y(15, 11),
- RTR_ID_X_Y(16, 11),
- RTR_ID_X_Y(17, 11)
-};
-
enum rtr_id {
DCORE0_RTR0,
DCORE0_RTR1,
@@ -1784,6 +2105,12 @@ static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id);
static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val);
static int gaudi2_send_job_to_kdma(struct hl_device *hdev, u64 src_addr, u64 dst_addr, u32 size,
bool is_memset);
+static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr);
static void gaudi2_init_scrambler_hbm(struct hl_device *hdev)
@@ -1988,6 +2315,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hints_range_reservation = true;
+ prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
+
if (hdev->pldm)
prop->mmu_pgt_size = 0x800000; /* 8MB */
else
@@ -2011,7 +2340,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
prop->dmmu.last_mask = LAST_MASK;
prop->dmmu.host_resident = 1;
- /* TODO: will be duplicated until implementing per-MMU props */
prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
@@ -2027,7 +2355,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.host_resident = 1;
prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
prop->pmmu.last_mask = LAST_MASK;
- /* TODO: will be duplicated until implementing per-MMU props */
prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
@@ -2084,11 +2411,14 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
}
+ prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
+ prop->supports_engine_modes = true;
+
prop->dc_power_default = DC_POWER_DEFAULT;
prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT;
@@ -2107,6 +2437,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST;
+ prop->tpc_interrupt_id = GAUDI2_IRQ_NUM_TPC_ASSERT;
+ prop->unexpected_user_error_interrupt_id = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR;
prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER;
@@ -2644,13 +2976,18 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi2_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
return 0;
@@ -2692,6 +3029,7 @@ static bool gaudi2_is_arc_tpc_owned(u64 arc_id)
static void gaudi2_init_arcs(struct hl_device *hdev)
{
+ struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 arc_id;
u32 i;
@@ -2721,6 +3059,10 @@ static void gaudi2_init_arcs(struct hl_device *hdev)
gaudi2_set_arc_id_cap(hdev, arc_id);
}
+
+ /* Fetch ARC scratchpad address */
+ hdev->asic_prop.engine_core_interrupt_reg_addr =
+ CFG_BASE + le32_to_cpu(dyn_regs->eng_arc_irq_ctrl);
}
static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
@@ -2772,16 +3114,21 @@ static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
return 0;
}
-static void gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
+static int gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
{
u16 arc_id;
+ int rc;
for (arc_id = CPU_ID_SCHED_ARC0 ; arc_id < CPU_ID_MAX ; arc_id++) {
if (!gaudi2_is_arc_enabled(hdev, arc_id))
continue;
- gaudi2_scrub_arc_dccm(hdev, arc_id);
+ rc = gaudi2_scrub_arc_dccm(hdev, arc_id);
+ if (rc)
+ return rc;
}
+
+ return 0;
}
static int gaudi2_late_init(struct hl_device *hdev)
@@ -2805,7 +3152,13 @@ static int gaudi2_late_init(struct hl_device *hdev)
}
gaudi2_init_arcs(hdev);
- gaudi2_scrub_arcs_dccm(hdev);
+
+ rc = gaudi2_scrub_arcs_dccm(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
+ goto disable_pci_access;
+ }
+
gaudi2_init_security(hdev);
return 0;
@@ -2989,6 +3342,13 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i, j, k;
+ /* Initialize TPC interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->tpc_interrupt, hdev, 0, HL_USR_INTERRUPT_TPC);
+
+ /* Initialize general purpose interrupt */
+ HL_USR_INTR_STRUCT_INIT(hdev->unexpected_error_interrupt, hdev, 0,
+ HL_USR_INTERRUPT_UNEXPECTED);
+
/* Initialize common user CQ interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev,
HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ);
@@ -3646,6 +4006,10 @@ static const char *gaudi2_irq_name(u16 irq_number)
return "gaudi2 completion";
case GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ... GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM:
return gaudi2_vdec_irq_name[irq_number - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM];
+ case GAUDI2_IRQ_NUM_TPC_ASSERT:
+ return "gaudi2 tpc assert";
+ case GAUDI2_IRQ_NUM_UNEXPECTED_ERROR:
+ return "gaudi2 tpc assert";
case GAUDI2_IRQ_NUM_USER_FIRST ... GAUDI2_IRQ_NUM_USER_LAST:
return "gaudi2 user completion";
default:
@@ -3677,7 +4041,6 @@ static void gaudi2_dec_disable_msix(struct hl_device *hdev, u32 max_irq_num)
static int gaudi2_dec_enable_msix(struct hl_device *hdev)
{
int rc, i, irq_init_cnt, irq, relative_idx;
- irq_handler_t irq_handler;
struct hl_dec *dec;
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0;
@@ -3687,20 +4050,24 @@ static int gaudi2_dec_enable_msix(struct hl_device *hdev)
irq = pci_irq_vector(hdev->pdev, i);
relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
- irq_handler = (relative_idx % 2) ?
- hl_irq_handler_dec_abnrm :
- hl_irq_handler_user_interrupt;
-
- dec = hdev->dec + relative_idx / 2;
-
/* We pass different structures depending on the irq handler. For the abnormal
* interrupt we pass hl_dec and for the regular interrupt we pass the relevant
* user_interrupt entry
+ *
+ * TODO: change the dec abnrm to threaded irq
*/
- rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i),
- ((relative_idx % 2) ?
- (void *) dec :
- (void *) &hdev->user_interrupt[dec->core_id]));
+
+ dec = hdev->dec + relative_idx / 2;
+ if (relative_idx % 2) {
+ rc = request_irq(irq, hl_irq_handler_dec_abnrm, 0,
+ gaudi2_irq_name(i), (void *) dec);
+ } else {
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(i),
+ (void *) &hdev->user_interrupt[dec->core_id]);
+ }
+
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_dec_irqs;
@@ -3719,7 +4086,6 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc, irq, i, j, user_irq_init_cnt;
- irq_handler_t irq_handler;
struct hl_cq *cq;
if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
@@ -3755,14 +4121,33 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
goto free_event_irq;
}
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(GAUDI2_IRQ_NUM_TPC_ASSERT), &hdev->tpc_interrupt);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_dec_irq;
+ }
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ rc = request_irq(irq, hl_irq_handler_user_interrupt, 0,
+ gaudi2_irq_name(GAUDI2_IRQ_NUM_UNEXPECTED_ERROR),
+ &hdev->unexpected_error_interrupt);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_tpc_irq;
+ }
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
user_irq_init_cnt < prop->user_interrupt_count;
i++, j++, user_irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
- irq_handler = hl_irq_handler_user_interrupt;
+ rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
+ hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
+ gaudi2_irq_name(i), &hdev->user_interrupt[j]);
- rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i), &hdev->user_interrupt[j]);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_user_irq;
@@ -3780,9 +4165,13 @@ free_user_irq:
irq = pci_irq_vector(hdev->pdev, i);
free_irq(irq, &hdev->user_interrupt[j]);
}
-
- gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
-
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ free_irq(irq, &hdev->unexpected_error_interrupt);
+free_tpc_irq:
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ free_irq(irq, &hdev->tpc_interrupt);
+free_dec_irq:
+ gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_DEC_LAST + 1);
free_event_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
free_irq(irq, cq);
@@ -3814,6 +4203,9 @@ static void gaudi2_sync_irqs(struct hl_device *hdev)
synchronize_irq(irq);
}
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT));
+ synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR));
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count;
i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
@@ -3840,6 +4232,12 @@ static void gaudi2_disable_msix(struct hl_device *hdev)
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
+ free_irq(irq, &hdev->tpc_interrupt);
+
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
+ free_irq(irq, &hdev->unexpected_error_interrupt);
+
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0;
k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
@@ -4037,7 +4435,6 @@ static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
{
int i, rc;
-
for (i = 0 ; i < num_cores ; i++) {
if (gaudi2_is_arc_enabled(hdev, core_ids[i]))
gaudi2_set_arc_running_mode(hdev, core_ids[i], core_command);
@@ -4059,6 +4456,139 @@ static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
return 0;
}
+static int gaudi2_set_tpc_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, tpc_id;
+
+ if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
+ return 0;
+
+ tpc_id = gaudi2_tpc_engine_id_to_tpc_id[engine_id];
+ if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + tpc_id)))
+ return 0;
+
+ reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id];
+ reg_addr = reg_base + TPC_CFG_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ if (engine_command == HL_ENGINE_RESUME) {
+ reg_base = gaudi2_tpc_eml_cfg_blocks_bases[tpc_id];
+ reg_addr = reg_base + TPC_EML_CFG_DBG_CNT_OFFSET;
+ RMWREG32(reg_addr, 0x1, DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK);
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_mme_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, mme_id;
+
+ mme_id = gaudi2_mme_engine_id_to_mme_id[engine_id];
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + mme_id)))
+ return 0;
+
+ reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id];
+ reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ return 0;
+}
+
+static int gaudi2_set_edma_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ u32 reg_base, reg_addr, reg_val, edma_id;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
+ return 0;
+
+ edma_id = gaudi2_edma_engine_id_to_edma_id[engine_id];
+ if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + edma_id)))
+ return 0;
+
+ reg_base = gaudi2_dma_core_blocks_bases[edma_id];
+ reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET;
+ reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK,
+ !!(engine_command == HL_ENGINE_STALL));
+ WREG32(reg_addr, reg_val);
+
+ if (engine_command == HL_ENGINE_STALL) {
+ reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 0x1) |
+ FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK, 0x1);
+ WREG32(reg_addr, reg_val);
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_engine_modes(struct hl_device *hdev,
+ u32 *engine_ids, u32 num_engines, u32 engine_command)
+{
+ int i, rc;
+
+ for (i = 0 ; i < num_engines ; ++i) {
+ switch (engine_ids[i]) {
+ case GAUDI2_DCORE0_ENGINE_ID_TPC_0 ... GAUDI2_DCORE0_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE1_ENGINE_ID_TPC_0 ... GAUDI2_DCORE1_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE2_ENGINE_ID_TPC_0 ... GAUDI2_DCORE2_ENGINE_ID_TPC_5:
+ case GAUDI2_DCORE3_ENGINE_ID_TPC_0 ... GAUDI2_DCORE3_ENGINE_ID_TPC_5:
+ rc = gaudi2_set_tpc_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ case GAUDI2_DCORE0_ENGINE_ID_MME:
+ case GAUDI2_DCORE1_ENGINE_ID_MME:
+ case GAUDI2_DCORE2_ENGINE_ID_MME:
+ case GAUDI2_DCORE3_ENGINE_ID_MME:
+ rc = gaudi2_set_mme_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ case GAUDI2_DCORE0_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE0_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE1_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE1_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE2_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE2_ENGINE_ID_EDMA_1:
+ case GAUDI2_DCORE3_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE3_ENGINE_ID_EDMA_1:
+ rc = gaudi2_set_edma_engine_mode(hdev, engine_ids[i], engine_command);
+ if (rc)
+ return rc;
+
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid engine ID %u\n", engine_ids[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi2_set_engines(struct hl_device *hdev, u32 *engine_ids,
+ u32 num_engines, u32 engine_command)
+{
+ switch (engine_command) {
+ case HL_ENGINE_CORE_HALT:
+ case HL_ENGINE_CORE_RUN:
+ return gaudi2_set_engine_cores(hdev, engine_ids, num_engines, engine_command);
+
+ case HL_ENGINE_STALL:
+ case HL_ENGINE_RESUME:
+ return gaudi2_set_engine_modes(hdev, engine_ids, num_engines, engine_command);
+
+ default:
+ dev_err(hdev->dev, "failed to execute command id %u\n", engine_command);
+ return -EINVAL;
+ }
+}
+
static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -5509,11 +6039,10 @@ static void gaudi2_send_hard_reset_cmd(struct hl_device *hdev)
* gaudi2_execute_hard_reset - execute hard reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
- * @reset_sleep_ms: sleep time in msec after reset
*
* This function executes hard reset based on if driver/FW should do the reset
*/
-static void gaudi2_execute_hard_reset(struct hl_device *hdev, u32 reset_sleep_ms)
+static void gaudi2_execute_hard_reset(struct hl_device *hdev)
{
if (hdev->asic_prop.hard_reset_done_by_fw) {
gaudi2_send_hard_reset_cmd(hdev);
@@ -5531,17 +6060,37 @@ static void gaudi2_execute_hard_reset(struct hl_device *hdev, u32 reset_sleep_ms
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
}
+static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
+{
+ int i, rc = 0;
+ u32 reg_val;
+
+ for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
+ rc = hl_poll_timeout(
+ hdev,
+ mmCPU_RST_STATUS_TO_HOST,
+ reg_val,
+ reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
+ 1000,
+ poll_timeout_us);
+
+ if (rc)
+ dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
+ reg_val);
+ return rc;
+}
+
/**
* gaudi2_execute_soft_reset - execute soft reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
- * @reset_sleep_ms: sleep time in msec after reset
* @driver_performs_reset: true if driver should perform reset instead of f/w.
+ * @poll_timeout_us: time to wait for response from f/w.
*
* This function executes soft reset based on if driver/FW should do the reset
*/
-static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms,
- bool driver_performs_reset)
+static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
+ u32 poll_timeout_us)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -5554,7 +6103,8 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
- return;
+
+ return gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
}
/* Block access to engines, QMANs and SM during reset, these
@@ -5569,17 +6119,14 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE + HL_BLOCK_SIZE);
WREG32(mmPSOC_RESET_CONF_SOFT_RST, 1);
+ return 0;
}
-static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 reset_sleep_ms,
- u32 poll_timeout_us)
+static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 poll_timeout_us)
{
int i, rc = 0;
u32 reg_val;
- /* without this sleep reset will not work */
- msleep(reset_sleep_ms);
-
/* We poll the BTM done indication multiple times after reset due to
* a HW errata 'GAUDI2_0300'
*/
@@ -5596,30 +6143,12 @@ static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 reset_sleep_m
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val);
}
-static void gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
-{
- int i, rc = 0;
- u32 reg_val;
-
- for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
- rc = hl_poll_timeout(
- hdev,
- mmCPU_RST_STATUS_TO_HOST,
- reg_val,
- reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
- 1000,
- poll_timeout_us);
-
- if (rc)
- dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
- reg_val);
-}
-
-static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 poll_timeout_us, reset_sleep_ms;
bool driver_performs_reset = false;
+ int rc;
if (hdev->pldm) {
reset_sleep_ms = hard_reset ? GAUDI2_PLDM_HRESET_TIMEOUT_MSEC :
@@ -5637,7 +6166,7 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese
if (hard_reset) {
driver_performs_reset = !hdev->asic_prop.hard_reset_done_by_fw;
- gaudi2_execute_hard_reset(hdev, reset_sleep_ms);
+ gaudi2_execute_hard_reset(hdev);
} else {
/*
* As we have to support also work with preboot only (which does not supports
@@ -5647,11 +6176,13 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese
*/
driver_performs_reset = (hdev->fw_components == FW_TYPE_PREBOOT_CPU &&
!hdev->asic_prop.fw_security_enabled);
- gaudi2_execute_soft_reset(hdev, reset_sleep_ms, driver_performs_reset);
+ rc = gaudi2_execute_soft_reset(hdev, driver_performs_reset, poll_timeout_us);
+ if (rc)
+ return rc;
}
skip_reset:
- if (driver_performs_reset || hard_reset)
+ if (driver_performs_reset || hard_reset) {
/*
* Instead of waiting for BTM indication we should wait for preboot ready:
* Consider the below scenario:
@@ -5671,17 +6202,18 @@ skip_reset:
* communicate with FW that is during reset.
* to overcome this we will always wait to preboot ready indication
*/
- if ((hdev->fw_components & FW_TYPE_PREBOOT_CPU)) {
- msleep(reset_sleep_ms);
+
+ /* without this sleep reset will not work */
+ msleep(reset_sleep_ms);
+
+ if (hdev->fw_components & FW_TYPE_PREBOOT_CPU)
hl_fw_wait_preboot_ready(hdev);
- } else {
- gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us);
- }
- else
- gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
+ else
+ gaudi2_poll_btm_indication(hdev, poll_timeout_us);
+ }
if (!gaudi2)
- return;
+ return 0;
gaudi2->dec_hw_cap_initialized &= ~(HW_CAP_DEC_MASK);
gaudi2->tpc_hw_cap_initialized &= ~(HW_CAP_TPC_MASK);
@@ -5708,6 +6240,7 @@ skip_reset:
HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_MME_MASK |
HW_CAP_ROT_MASK);
}
+ return 0;
}
static int gaudi2_suspend(struct hl_device *hdev)
@@ -6369,12 +6902,16 @@ static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
size_t irq_arr_size;
+ int rc;
- /* TODO: missing gaudi2_nic_resume.
- * Until implemented nic_hw_cap_initialized will remain zeroed
- */
gaudi2_init_arcs(hdev);
- gaudi2_scrub_arcs_dccm(hdev);
+
+ rc = gaudi2_scrub_arcs_dccm(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
+ return rc;
+ }
+
gaudi2_init_security(hdev);
/* Unmask all IRQs since some could have been received during the soft reset */
@@ -6382,74 +6919,21 @@ static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
return hl_fw_unmask_irq_arr(hdev, gaudi2->hw_events, irq_arr_size);
}
-static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
- struct iterate_module_ctx *ctx)
-{
- struct gaudi2_tpc_idle_data *idle_data = ctx->data;
- u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
- bool is_eng_idle;
- int engine_idx;
-
- if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
- engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
- else
- engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
- dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
-
- tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
- qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
- qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
- qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
-
- is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_TPC_IDLE(tpc_cfg_sts);
- *(idle_data->is_idle) &= is_eng_idle;
-
- if (idle_data->mask && !is_eng_idle)
- set_bit(engine_idx, idle_data->mask);
-
- if (idle_data->e)
- hl_engine_data_sprintf(idle_data->e,
- idle_data->tpc_fmt, dcore, inst,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
-}
-
-static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
- struct engines_data *e)
+static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
- u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_idle_ind_mask,
- mme_arch_sts, dec_swreg15, dec_enabled_bit;
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-12x%s\n";
unsigned long *mask = (unsigned long *) mask_arr;
- const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#x\n";
- const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
- const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
- const char *pdma_fmt = "%-6d%-9s%#-14x%#x\n";
- const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
- const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
+ const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#-15x%#x\n";
bool is_idle = true, is_eng_idle;
- u64 offset;
-
- struct gaudi2_tpc_idle_data tpc_idle_data = {
- .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
- .e = e,
- .mask = mask,
- .is_idle = &is_idle,
- };
- struct iterate_module_ctx tpc_iter = {
- .fn = &gaudi2_is_tpc_engine_idle,
- .data = &tpc_idle_data,
- };
-
int engine_idx, i, j;
+ u64 offset;
- /* EDMA, Two engines per Dcore */
if (e)
hl_engine_data_sprintf(e,
- "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ---- ------- ------------ ----------------------\n");
+ "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
+ "---- ---- ------- ------------ ------------- -------------\n");
for (i = 0; i < NUM_OF_DCORES; i++) {
for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) {
@@ -6462,45 +6946,56 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
offset = i * DCORE_OFFSET + j * DCORE_EDMA_OFFSET;
- dma_core_idle_ind_mask =
- RREG32(mmDCORE0_EDMA0_CORE_IDLE_IND_MASK + offset);
+ dma_core_sts0 = RREG32(mmDCORE0_EDMA0_CORE_STS0 + offset);
+ dma_core_sts1 = RREG32(mmDCORE0_EDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmDCORE0_EDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_DMA_IDLE(dma_core_idle_ind_mask);
+ IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
- hl_engine_data_sprintf(e, edma_fmt, i, j,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ hl_engine_data_sprintf(e, edma_fmt, i, j, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
}
- /* PDMA, Two engines in Full chip */
+ return is_idle;
+}
+
+static bool gaudi2_get_pdma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *pdma_fmt = "%-6d%-9s%#-14x%#-15x%#x\n";
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
- "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ------- ------------ ----------------------\n");
+ "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
+ "---- ------- ------------ ------------- -------------\n");
for (i = 0 ; i < NUM_OF_PDMA ; i++) {
engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
offset = i * PDMA_OFFSET;
- dma_core_idle_ind_mask = RREG32(mmPDMA0_CORE_IDLE_IND_MASK + offset);
+ dma_core_sts0 = RREG32(mmPDMA0_CORE_STS0 + offset);
+ dma_core_sts1 = RREG32(mmPDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmPDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmPDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmPDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
- IS_DMA_IDLE(dma_core_idle_ind_mask);
+ IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
@@ -6508,9 +7003,22 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
if (e)
hl_engine_data_sprintf(e, pdma_fmt, i, is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, dma_core_idle_ind_mask);
+ qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_nic_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset = 0;
+
/* NIC, twelve macros in Full chip */
if (e && hdev->nic_ports_mask)
hl_engine_data_sprintf(e,
@@ -6544,6 +7052,19 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
qm_glbl_sts0, qm_cgm_sts);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, mme_arch_sts;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
"\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
@@ -6574,16 +7095,82 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
set_bit(engine_idx, mask);
}
- /*
- * TPC
- */
+ return is_idle;
+}
+
+static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
+ struct iterate_module_ctx *ctx)
+{
+ struct gaudi2_tpc_idle_data *idle_data = ctx->data;
+ u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_eng_idle;
+ int engine_idx;
+
+ if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
+ else
+ engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
+ dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
+
+ tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
+ qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
+ qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
+ qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
+
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ *(idle_data->is_idle) &= is_eng_idle;
+
+ if (idle_data->mask && !is_eng_idle)
+ set_bit(engine_idx, idle_data->mask);
+
+ if (idle_data->e)
+ hl_engine_data_sprintf(idle_data->e,
+ idle_data->tpc_fmt, dcore, inst,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
+}
+
+static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ bool is_idle = true;
+
+ struct gaudi2_tpc_idle_data tpc_idle_data = {
+ .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
+ .e = e,
+ .mask = mask,
+ .is_idle = &is_idle,
+ };
+ struct iterate_module_ctx tpc_iter = {
+ .fn = &gaudi2_is_tpc_engine_idle,
+ .data = &tpc_idle_data,
+ };
+
if (e && prop->tpc_enabled_mask)
hl_engine_data_sprintf(e,
- "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_IDLE_IND_MASK\n"
- "---- --- -------- ------------ ---------- ----------------------\n");
+ "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS STATUS\n"
+ "---- --- ------- ------------ ---------- ------\n");
gaudi2_iterate_tpcs(hdev, &tpc_iter);
+ return tpc_idle_data.is_idle;
+}
+
+static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ unsigned long *mask = (unsigned long *) mask_arr;
+ const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
+ const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
+ bool is_idle = true, is_eng_idle;
+ u32 dec_swreg15, dec_enabled_bit;
+ int engine_idx, i, j;
+ u64 offset;
+
/* Decoders, two each Dcore and two shared PCIe decoders */
if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
hl_engine_data_sprintf(e,
@@ -6638,10 +7225,23 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
is_eng_idle ? "Y" : "N", dec_swreg15);
}
+ return is_idle;
+}
+
+static bool gaudi2_get_rotator_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-14x%#x\n";
+ unsigned long *mask = (unsigned long *) mask_arr;
+ u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
+ bool is_idle = true, is_eng_idle;
+ int engine_idx, i;
+ u64 offset;
+
if (e)
hl_engine_data_sprintf(e,
- "\nCORE ROT is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
- "---- ---- ------- ------------ ---------- -------------\n");
+ "\nCORE ROT is_idle QM_GLBL_STS0 QM_GLBL_STS1 QM_CGM_STS\n"
+ "---- --- ------- ------------ ------------ ----------\n");
for (i = 0 ; i < NUM_OF_ROT ; i++) {
engine_idx = GAUDI2_ENGINE_ID_ROT_0 + i;
@@ -6660,12 +7260,28 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask
if (e)
hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
- qm_glbl_sts0, qm_cgm_sts, "-");
+ qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
}
return is_idle;
}
+static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
+{
+ bool is_idle = true;
+
+ is_idle &= gaudi2_get_edma_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_pdma_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_nic_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_mme_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_tpc_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_decoder_idle_status(hdev, mask_arr, mask_len, e);
+ is_idle &= gaudi2_get_rotator_idle_status(hdev, mask_arr, mask_len, e);
+
+ return is_idle;
+}
+
static void gaudi2_hw_queues_lock(struct hl_device *hdev)
__acquires(&gaudi2->hw_queues_lock)
{
@@ -7040,7 +7656,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
memory_wrapper_idx = ecc_data->memory_wrapper_idx;
gaudi2_print_event(hdev, event_type, !ecc_data->is_critical,
- "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.\n",
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.",
ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical);
return !!ecc_data->is_critical;
@@ -7352,10 +7968,8 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
case RAZWI_TPC:
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
- /* TODO : remove this check and depend only on tpc routers table
- * when SW-118828 is resolved
- */
- if (!hdev->asic_prop.fw_security_enabled &&
+ if (hl_is_fw_ver_below_1_9(hdev) &&
+ !hdev->asic_prop.fw_security_enabled &&
((module_idx == 0) || (module_idx == 1)))
lbw_rtr_id = DCORE0_RTR0;
else
@@ -7526,297 +8140,115 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
-static const char *gaudi2_get_initiators_name(u32 rtr_id)
-{
- switch (rtr_id) {
- case DCORE0_RTR0:
- return "DEC0/1/8/9, TPC24, PDMA0/1, PMMU, PCIE_IF, EDMA0/2, HMMU0/2/4/6, CPU";
- case DCORE0_RTR1:
- return "TPC0/1";
- case DCORE0_RTR2:
- return "TPC2/3";
- case DCORE0_RTR3:
- return "TPC4/5";
- case DCORE0_RTR4:
- return "MME0_SBTE0/1";
- case DCORE0_RTR5:
- return "MME0_WAP0/SBTE2";
- case DCORE0_RTR6:
- return "MME0_CTRL_WR/SBTE3";
- case DCORE0_RTR7:
- return "MME0_WAP1/CTRL_RD/SBTE4";
- case DCORE1_RTR0:
- return "MME1_WAP1/CTRL_RD/SBTE4";
- case DCORE1_RTR1:
- return "MME1_CTRL_WR/SBTE3";
- case DCORE1_RTR2:
- return "MME1_WAP0/SBTE2";
- case DCORE1_RTR3:
- return "MME1_SBTE0/1";
- case DCORE1_RTR4:
- return "TPC10/11";
- case DCORE1_RTR5:
- return "TPC8/9";
- case DCORE1_RTR6:
- return "TPC6/7";
- case DCORE1_RTR7:
- return "DEC2/3, NIC0/1/2/3/4, ARC_FARM, KDMA, EDMA1/3, HMMU1/3/5/7";
- case DCORE2_RTR0:
- return "DEC4/5, NIC5/6/7/8, EDMA4/6, HMMU8/10/12/14, ROT0";
- case DCORE2_RTR1:
- return "TPC16/17";
- case DCORE2_RTR2:
- return "TPC14/15";
- case DCORE2_RTR3:
- return "TPC12/13";
- case DCORE2_RTR4:
- return "MME2_SBTE0/1";
- case DCORE2_RTR5:
- return "MME2_WAP0/SBTE2";
- case DCORE2_RTR6:
- return "MME2_CTRL_WR/SBTE3";
- case DCORE2_RTR7:
- return "MME2_WAP1/CTRL_RD/SBTE4";
- case DCORE3_RTR0:
- return "MME3_WAP1/CTRL_RD/SBTE4";
- case DCORE3_RTR1:
- return "MME3_CTRL_WR/SBTE3";
- case DCORE3_RTR2:
- return "MME3_WAP0/SBTE2";
- case DCORE3_RTR3:
- return "MME3_SBTE0/1";
- case DCORE3_RTR4:
- return "TPC18/19";
- case DCORE3_RTR5:
- return "TPC20/21";
- case DCORE3_RTR6:
- return "TPC22/23";
- case DCORE3_RTR7:
- return "DEC6/7, NIC9/10/11, EDMA5/7, HMMU9/11/13/15, ROT1, PSOC";
- default:
- return "N/A";
- }
-}
-
-static u16 gaudi2_get_razwi_initiators(u32 rtr_id, u16 *engines)
-{
- switch (rtr_id) {
- case DCORE0_RTR0:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_PCIE_ENGINE_ID_DEC_0;
- engines[3] = GAUDI2_PCIE_ENGINE_ID_DEC_1;
- engines[4] = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
- engines[5] = GAUDI2_ENGINE_ID_PDMA_0;
- engines[6] = GAUDI2_ENGINE_ID_PDMA_1;
- engines[7] = GAUDI2_ENGINE_ID_PCIE;
- engines[8] = GAUDI2_DCORE0_ENGINE_ID_EDMA_0;
- engines[9] = GAUDI2_DCORE1_ENGINE_ID_EDMA_0;
- engines[10] = GAUDI2_ENGINE_ID_PSOC;
- return 11;
-
- case DCORE0_RTR1:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE0_RTR2:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE0_RTR3:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE0_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE0_RTR4:
- case DCORE0_RTR5:
- case DCORE0_RTR6:
- case DCORE0_RTR7:
- engines[0] = GAUDI2_DCORE0_ENGINE_ID_MME;
- return 1;
-
- case DCORE1_RTR0:
- case DCORE1_RTR1:
- case DCORE1_RTR2:
- case DCORE1_RTR3:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_MME;
- return 1;
-
- case DCORE1_RTR4:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE1_RTR5:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE1_RTR6:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE1_RTR7:
- engines[0] = GAUDI2_DCORE1_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE1_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC0_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC1_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC2_0;
- engines[5] = GAUDI2_ENGINE_ID_NIC3_0;
- engines[6] = GAUDI2_ENGINE_ID_NIC4_0;
- engines[7] = GAUDI2_ENGINE_ID_ARC_FARM;
- engines[8] = GAUDI2_ENGINE_ID_KDMA;
- engines[9] = GAUDI2_DCORE0_ENGINE_ID_EDMA_1;
- engines[10] = GAUDI2_DCORE1_ENGINE_ID_EDMA_1;
- return 11;
-
- case DCORE2_RTR0:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC5_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC6_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC7_0;
- engines[5] = GAUDI2_ENGINE_ID_NIC8_0;
- engines[6] = GAUDI2_DCORE2_ENGINE_ID_EDMA_0;
- engines[7] = GAUDI2_DCORE3_ENGINE_ID_EDMA_0;
- engines[8] = GAUDI2_ENGINE_ID_ROT_0;
- return 9;
-
- case DCORE2_RTR1:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_5;
- return 2;
-
- case DCORE2_RTR2:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_3;
- return 2;
-
- case DCORE2_RTR3:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE2_ENGINE_ID_TPC_1;
- return 2;
-
- case DCORE2_RTR4:
- case DCORE2_RTR5:
- case DCORE2_RTR6:
- case DCORE2_RTR7:
- engines[0] = GAUDI2_DCORE2_ENGINE_ID_MME;
- return 1;
- case DCORE3_RTR0:
- case DCORE3_RTR1:
- case DCORE3_RTR2:
- case DCORE3_RTR3:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_MME;
- return 1;
- case DCORE3_RTR4:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_0;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_1;
- return 2;
- case DCORE3_RTR5:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_2;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_3;
- return 2;
- case DCORE3_RTR6:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_TPC_4;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_TPC_5;
- return 2;
- case DCORE3_RTR7:
- engines[0] = GAUDI2_DCORE3_ENGINE_ID_DEC_0;
- engines[1] = GAUDI2_DCORE3_ENGINE_ID_DEC_1;
- engines[2] = GAUDI2_ENGINE_ID_NIC9_0;
- engines[3] = GAUDI2_ENGINE_ID_NIC10_0;
- engines[4] = GAUDI2_ENGINE_ID_NIC11_0;
- engines[5] = GAUDI2_DCORE2_ENGINE_ID_EDMA_1;
- engines[6] = GAUDI2_DCORE3_ENGINE_ID_EDMA_1;
- engines[7] = GAUDI2_ENGINE_ID_ROT_1;
- engines[8] = GAUDI2_ENGINE_ID_ROT_0;
- return 9;
- default:
- return 0;
- }
-}
-
-static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u32 rtr_id,
- u64 rtr_ctrl_base_addr, bool is_write,
- u64 *event_mask)
+static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u32 array_size,
+ u32 axuser_xy, u32 *base, u16 *eng_id,
+ char *eng_name)
{
- u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng;
- u32 razwi_hi, razwi_lo;
- u8 rd_wr_flag;
-
- num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]);
- if (is_write) {
- razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI);
- razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO);
- rd_wr_flag = HL_RAZWI_WRITE;
+ int i, num_of_eng = 0;
+ u16 str_size = 0;
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1);
- } else {
- razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI);
- razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO);
- rd_wr_flag = HL_RAZWI_READ;
+ for (i = 0 ; i < array_size ; i++) {
+ if (axuser_xy != razwi_info[i].axuser_xy)
+ continue;
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1);
+ eng_id[num_of_eng] = razwi_info[i].eng_id;
+ base[num_of_eng] = razwi_info[i].rtr_ctrl;
+ if (!num_of_eng)
+ str_size += snprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
+ razwi_info[i].eng_name);
+ else
+ str_size += snprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
+ razwi_info[i].eng_name);
+ num_of_eng++;
}
- hl_handle_razwi(hdev, (u64)razwi_hi << 32 | razwi_lo, &engines[0], num_of_eng,
- rd_wr_flag | HL_RAZWI_HBW, event_mask);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW %s error, rtr id %u, address %#llx\n",
- is_write ? "WR" : "RD", rtr_id, (u64)razwi_hi << 32 | razwi_lo);
-
- dev_err_ratelimited(hdev->dev,
- "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
+ return num_of_eng;
}
-static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u32 rtr_id,
- u64 rtr_ctrl_base_addr, bool is_write,
- u64 *event_mask)
+static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_reg,
+ u64 *event_mask)
{
- u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng;
- u64 razwi_addr = CFG_BASE;
- u8 rd_wr_flag;
+ u32 axuser_xy = RAZWI_GET_AXUSER_XY(razwi_reg), addr_hi = 0, addr_lo = 0;
+ u32 base[PSOC_RAZWI_MAX_ENG_PER_RTR];
+ u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR];
+ char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE];
+ bool razwi_happened = false;
+ int i;
- num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]);
+ num_of_eng = gaudi2_psoc_razwi_get_engines(common_razwi_info, ARRAY_SIZE(common_razwi_info),
+ axuser_xy, base, eng_id, eng_name_str);
- if (is_write) {
- razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
- rd_wr_flag = HL_RAZWI_WRITE;
+ /* If no match for XY coordinates, try to find it in MME razwi table */
+ if (!num_of_eng) {
+ axuser_xy = RAZWI_GET_AXUSER_LOW_XY(razwi_reg);
+ num_of_eng = gaudi2_psoc_razwi_get_engines(mme_razwi_info,
+ ARRAY_SIZE(mme_razwi_info),
+ axuser_xy, base, eng_id,
+ eng_name_str);
+ }
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
- } else {
- razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
- rd_wr_flag = HL_RAZWI_READ;
+ for (i = 0 ; i < num_of_eng ; i++) {
+ if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) {
+ addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI);
+ addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO);
+ dev_err(hdev->dev,
+ "PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
+ eng_name_str, ((u64)addr_hi << 32) + addr_lo);
+ hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask);
+ razwi_happened = true;
+ }
- /* Clear set indication */
- WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1);
- }
+ if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) {
+ addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI);
+ addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO);
+ dev_err(hdev->dev,
+ "PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
+ eng_name_str, ((u64)addr_hi << 32) + addr_lo);
+ hl_handle_razwi(hdev, ((u64)addr_hi << 32) + addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask);
+ razwi_happened = true;
+ }
- hl_handle_razwi(hdev, razwi_addr, &engines[0], num_of_eng, rd_wr_flag | HL_RAZWI_LBW,
- event_mask);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW %s error, rtr id %u, address 0x%llX\n",
- is_write ? "WR" : "RD", rtr_id, razwi_addr);
+ if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) {
+ addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR);
+ dev_err(hdev->dev,
+ "PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
+ eng_name_str, addr_lo);
+ hl_handle_razwi(hdev, addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask);
+ razwi_happened = true;
+ }
- dev_err_ratelimited(hdev->dev,
- "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
+ if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) {
+ addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR);
+ dev_err(hdev->dev,
+ "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
+ eng_name_str, addr_lo);
+ hl_handle_razwi(hdev, addr_lo, &eng_id[0],
+ num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask);
+ razwi_happened = true;
+ }
+ /* In common case the loop will break, when there is only one engine id, or
+ * several engines with the same router. The exceptional case is with psoc razwi
+ * from EDMA, where it's possible to get axuser id which fits 2 routers (2
+ * interfaces of sft router). In this case, maybe the first router won't hold info
+ * and we will need to iterate on the other router.
+ */
+ if (razwi_happened)
+ break;
+ }
+
+ return razwi_happened;
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
{
- u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy,
- razwi_mask_info, razwi_intr = 0, error_count = 0;
- int rtr_map_arr_len = NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES;
- u64 rtr_ctrl_base_addr;
+ u32 razwi_mask_info, razwi_intr = 0, error_count = 0;
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) {
razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT);
@@ -7825,63 +8257,22 @@ static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *even
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
- xy = FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info);
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info),
- xy,
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info));
- if (xy == 0) {
- dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: received event from 0 rtr coordinates\n");
- goto clear;
- }
-
- /* Find router id by router coordinates */
- for (rtr_id = 0 ; rtr_id < rtr_map_arr_len ; rtr_id++)
- if (rtr_coordinates_to_rtr_id[rtr_id] == xy)
- break;
-
- if (rtr_id == rtr_map_arr_len) {
+ if (gaudi2_handle_psoc_razwi_happened(hdev, razwi_mask_info, event_mask))
+ error_count++;
+ else
dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: invalid rtr coordinates (0x%x)\n", xy);
- goto clear;
- }
-
- /* Find router mstr_if register base */
- dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
- dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
- rtr_ctrl_base_addr = mmDCORE0_RTR0_CTRL_BASE + dcore_id * DCORE_OFFSET +
- dcore_rtr_id * DCORE_RTR_OFFSET;
-
- hbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET);
- hbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET);
- lbw_aw_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET);
- lbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET);
-
- if (hbw_aw_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, true, event_mask);
-
- if (hbw_ar_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, false, event_mask);
+ "PSOC RAZWI interrupt: invalid razwi info (0x%x)\n",
+ razwi_mask_info);
- if (lbw_aw_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, true, event_mask);
-
- if (lbw_ar_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
- rtr_ctrl_base_addr, false, event_mask);
-
- error_count++;
-
-clear:
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX))
WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr);
@@ -7976,7 +8367,7 @@ static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
{
u32 qid_base, error_count = 0;
u64 qman_base;
- u8 index;
+ u8 index = 0;
switch (event_type) {
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC5_QM:
@@ -8318,14 +8709,13 @@ static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type,
return error_count;
}
-static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type,
- u64 intr_cause_data)
+static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, int sts_addr)
{
- u32 error_count = 0;
+ u32 error_count = 0, sts_val = RREG32(sts_addr);
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i)) {
+ if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_dma_core_interrupts_cause[i]);
error_count++;
@@ -8336,6 +8726,27 @@ static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type,
return error_count;
}
+static int gaudi2_handle_pdma_core_event(struct hl_device *hdev, u16 event_type, int pdma_idx)
+{
+ u32 sts_addr;
+
+ sts_addr = mmPDMA0_CORE_ERR_CAUSE + pdma_idx * PDMA_OFFSET;
+ return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
+}
+
+static int gaudi2_handle_edma_core_event(struct hl_device *hdev, u16 event_type, int edma_idx)
+{
+ static const int edma_event_index_map[] = {2, 3, 0, 1, 6, 7, 4, 5};
+ u32 sts_addr, index;
+
+ index = edma_event_index_map[edma_idx];
+
+ sts_addr = mmDCORE0_EDMA0_CORE_ERR_CAUSE +
+ DCORE_OFFSET * (index / NUM_OF_EDMA_PER_DCORE) +
+ DCORE_EDMA_OFFSET * (index % NUM_OF_EDMA_PER_DCORE);
+ return gaudi2_handle_dma_core_event(hdev, event_type, sts_addr);
+}
+
static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask)
{
u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr;
@@ -8453,7 +8864,7 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
is_pmmu ? "PMMU" : "HMMU", addr, ((u64)axid_h << 32) + axid_l);
hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask);
- WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0);
+ WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
}
static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu)
@@ -8534,7 +8945,7 @@ static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_in
continue;
gaudi2_print_event(hdev, event_type, true,
- "err cause: %s. %s: 0x%X\n",
+ "err cause: %s. %s: 0x%X",
gaudi2_sm_sei_cause[i].cause_name,
gaudi2_sm_sei_cause[i].log_name,
sei_cause_log);
@@ -8740,12 +9151,12 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s",
- "Invalid HBM SEI event cause (%d) provided by FW\n", cause_idx);
+ "Invalid HBM SEI event cause (%d) provided by FW", cause_idx);
return true;
}
gaudi2_print_event(hdev, event_type, !sei_data->hdr.is_critical,
- "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s",
sei_data->hdr.is_critical ? "Critical" : "Non-critical",
hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
hbm_mc_sei_cause[cause_idx]);
@@ -8869,7 +9280,7 @@ static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
gaudi2_print_event(hdev, event_type, false,
- "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
+ "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci),
q->pi, atomic_read(&q->ci));
}
@@ -8883,7 +9294,7 @@ static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
if (p2p_intr) {
gaudi2_print_event(hdev, event_type, true,
- "pcie p2p transaction terminated due to security, req_id(0x%x)\n",
+ "pcie p2p transaction terminated due to security, req_id(0x%x)",
RREG32(mmPCIE_WRAP_P2P_REQ_ID));
WREG32(mmPCIE_WRAP_P2P_INTR, 0x1);
@@ -8892,7 +9303,7 @@ static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
if (msix_gw_intr) {
gaudi2_print_event(hdev, event_type, true,
- "pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n",
+ "pcie msi-x gen denied due to vector num check failure, vec(0x%X)",
RREG32(mmPCIE_WRAP_MSIX_GW_VEC));
WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1);
@@ -8954,7 +9365,7 @@ static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
gaudi2_print_event(hdev, event_type, false,
- "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
+ "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
@@ -8974,11 +9385,11 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u\n",
+ "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
engine_id, intr_type, q->queue_index);
return 1;
default:
- gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type\n");
+ gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
return 0;
}
}
@@ -8987,7 +9398,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, is_critical = false;
- u32 index, ctl, reset_flags = HL_DRV_RESET_HARD, error_count = 0;
+ u32 index, ctl, reset_flags = 0, error_count = 0;
u64 event_mask = 0;
u16 event_type;
@@ -9153,9 +9564,15 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
- case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
- error_count = gaudi2_handle_dma_core_event(hdev, event_type,
- le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE:
+ index = event_type - GAUDI2_EVENT_HDMA2_CORE;
+ error_count = gaudi2_handle_edma_core_event(hdev, event_type, index);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ break;
+
+ case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE:
+ index = event_type - GAUDI2_EVENT_PDMA0_CORE;
+ error_count = gaudi2_handle_pdma_core_event(hdev, event_type, index);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -9381,7 +9798,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
break;
case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED:
- case GAUDI2_EVENT_DEV_RESET_REQ:
+ case GAUDI2_EVENT_CPU_DEV_RESET_REQ:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
error_count = GAUDI2_NA_EVENT_CAUSE;
is_critical = true;
@@ -9403,12 +9820,18 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_print_event(hdev, event_type, true, "%d", event_type);
else if (error_count == 0)
gaudi2_print_event(hdev, event_type, true,
- "No error cause for H/W event %u\n", event_type);
+ "No error cause for H/W event %u", event_type);
- if ((gaudi2_irq_map_table[event_type].reset || reset_required) &&
- (hdev->hard_reset_on_fw_events ||
- (hdev->asic_prop.fw_security_enabled && is_critical)))
- goto reset_device;
+ if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) ||
+ reset_required) {
+ if (reset_required ||
+ (gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
+ reset_flags |= HL_DRV_RESET_HARD;
+
+ if (hdev->hard_reset_on_fw_events ||
+ (hdev->asic_prop.fw_security_enabled && is_critical))
+ goto reset_device;
+ }
/* Send unmask irq only for interrupts not classified as MSG */
if (!gaudi2_irq_map_table[event_type].msg)
@@ -9426,6 +9849,10 @@ reset_device:
} else {
reset_flags |= HL_DRV_RESET_DELAY;
}
+ /* escalate general hw errors to critical/fatal error */
+ if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
+ hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -9832,16 +10259,23 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
/* Create mapping on asic side */
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
- hl_mmu_invalidate_cache_range(hdev, false,
+ if (rc) {
+ dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
+ goto unreserve_va;
+ }
+
+ rc = hl_mmu_invalidate_cache_range(hdev, false,
MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&hdev->mmu_lock);
if (rc) {
- dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
+ hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
goto unreserve_va;
}
+ mutex_unlock(&hdev->mmu_lock);
+
/* Enable MMU on KDMA */
gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
@@ -9870,11 +10304,16 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
mutex_lock(&hdev->mmu_lock);
- hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
- hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
+
+ rc = hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
+ if (rc)
+ goto unreserve_va;
+
+ rc = hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&hdev->mmu_lock);
+
unreserve_va:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
free_data_buffer:
hl_asic_dma_free_coherent(hdev, SZ_2M, host_mem_virtual_addr, host_mem_dma_addr);
@@ -9927,17 +10366,24 @@ static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *c
}
mutex_lock(&hdev->mmu_lock);
+
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
- hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&hdev->mmu_lock);
-
if (rc)
goto unreserve_internal_cb_pool;
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
+ if (rc)
+ goto unmap_internal_cb_pool;
+
+ mutex_unlock(&hdev->mmu_lock);
+
return 0;
+unmap_internal_cb_pool:
+ hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
+ mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
gen_pool_destroy(hdev->internal_cb_pool);
@@ -10724,6 +11170,7 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
.set_engine_cores = gaudi2_set_engine_cores,
+ .set_engines = gaudi2_set_engines,
.send_device_activity = gaudi2_send_device_activity,
.set_dram_properties = gaudi2_set_dram_properties,
.set_binning_masks = gaudi2_set_binning_masks,
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 2687404d9d21..0742046810f9 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -387,6 +387,8 @@ enum gaudi2_edma_id {
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/
#define GAUDI2_NUM_USER_INTERRUPTS 255
+#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
+#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
@@ -410,12 +412,15 @@ enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
+ GAUDI2_IRQ_NUM_DEC_LAST = GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
GAUDI2_IRQ_NUM_COMPLETION,
GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
+ GAUDI2_IRQ_NUM_TPC_ASSERT,
GAUDI2_IRQ_NUM_RESERVED_FIRST,
- GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_USER_INTERRUPTS - 1),
- GAUDI2_IRQ_NUM_USER_FIRST,
+ GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
+ GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
+ GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
};
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 1dfbe293ecec..25b5368f37dd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2657,7 +2657,7 @@ int gaudi2_coresight_init(struct hl_device *hdev)
/*
* Mask out all the disabled binned offsets.
* so when user request to configure a binned or masked out component,
- * driver will ignore programing it ( happens when offset value is set to 0x0 )
+ * driver will ignore programming it ( happens when offset value is set to 0x0 )
* this is being set in gaudi2_coresight_set_disabled_components
*/
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
index e9ac87828221..e6664c4a2cf5 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
@@ -79,7 +79,6 @@
DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_MASK)
#define TPC_IDLE_MASK (DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
- DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_SB_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_QM_IDLE_MASK | \
@@ -87,6 +86,8 @@
#define DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40
+
/* CGM_IDLE_MASK is valid for all engines CGM idle check */
#define CGM_IDLE_MASK DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
index a212f82e6604..694735f9e6e6 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
@@ -1595,6 +1595,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
+ mmDCORE0_TPC0_CFG_TPC_ID,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_1,
diff --git a/drivers/accel/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index df65e9bdc18a..07d67878eac5 100644
--- a/drivers/accel/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -472,6 +472,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
prop->first_available_user_interrupt = USHRT_MAX;
+ prop->tpc_interrupt_id = USHRT_MAX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
@@ -668,13 +669,18 @@ pci_init:
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
+ /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true, false);
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
+ goto pci_fini;
+ }
}
if (!hdev->pldm) {
@@ -2782,7 +2788,7 @@ disable_queues:
return rc;
}
-static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
@@ -2828,17 +2834,17 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
- if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
- dev_err(hdev->dev,
- "Timeout while waiting for device to reset 0x%x\n",
- status);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
+ dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
+ return -ETIMEDOUT;
+ }
if (!hard_reset && goya) {
goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
HW_CAP_GOLDEN | HW_CAP_TPC);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_SOFT_RESET);
- return;
+ return 0;
}
/* Chicken bit to re-initiate boot sequencer flow */
@@ -2857,6 +2863,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
memset(goya->events_stat, 0, sizeof(goya->events_stat));
}
+ return 0;
}
int goya_suspend(struct hl_device *hdev)
diff --git a/drivers/accel/habanalabs/include/common/cpucp_if.h b/drivers/accel/habanalabs/include/common/cpucp_if.h
index d713252a4f13..8bbe685458c4 100644
--- a/drivers/accel/habanalabs/include/common/cpucp_if.h
+++ b/drivers/accel/habanalabs/include/common/cpucp_if.h
@@ -357,6 +357,7 @@ struct hl_eq_addr_dec_intr_data {
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
+ __le64 data_placeholder;
struct hl_eq_ecc_data ecc_data;
struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Gaudi1 HBM */
struct hl_eq_sm_sei_data sm_sei_data;
@@ -661,6 +662,9 @@ enum pq_init_status {
* CPUCP_PACKET_ACTIVE_STATUS_SET -
* LKD sends FW indication whether device is free or in use, this indication is reported
* also to the BMC.
+ *
+ * CPUCP_PACKET_REGISTER_INTERRUPTS -
+ * Packet to register interrupts indicating LKD is ready to receive events from FW.
*/
enum cpucp_packet_id {
@@ -725,6 +729,8 @@ enum cpucp_packet_id {
CPUCP_PACKET_RESERVED9, /* not used */
CPUCP_PACKET_RESERVED10, /* not used */
CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_REGISTER_INTERRUPTS, /* internal */
CPUCP_PACKET_ID_MAX /* must be last */
};
@@ -1127,6 +1133,7 @@ struct cpucp_security_info {
* (0 = functional 1 = binned)
* @interposer_version: Interposer version programmed in eFuse
* @substrate_version: Substrate version programmed in eFuse
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
* @fw_os_version: Firmware OS Version
*/
struct cpucp_info {
@@ -1154,7 +1161,7 @@ struct cpucp_info {
__u8 substrate_version;
__u8 reserved2;
struct cpucp_security_info sec_info;
- __le32 reserved3;
+ __le32 fw_hbm_region_size;
__u8 pll_map[PLL_MAP_LEN];
__le64 mme_binning_mask;
__u8 fw_os_version[VERSION_MAX_LEN];
diff --git a/drivers/accel/habanalabs/include/common/hl_boot_if.h b/drivers/accel/habanalabs/include/common/hl_boot_if.h
index 2256add057c5..c58d76a2705c 100644
--- a/drivers/accel/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/accel/habanalabs/include/common/hl_boot_if.h
@@ -770,15 +770,23 @@ enum hl_components {
HL_COMPONENTS_ARMCP,
HL_COMPONENTS_CPLD,
HL_COMPONENTS_UBOOT,
+ HL_COMPONENTS_FUSE,
HL_COMPONENTS_MAX_NUM = 16
};
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
/**
* struct hl_component_versions - versions associated with hl component.
* @struct_size: size of all the struct (including dynamic size of modules).
* @modules_offset: offset of the modules field in this struct.
* @component: version of the component itself.
* @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
* @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum
* hl_modules is used.
* @modules_counter: number of set bits in modules_mask.
@@ -791,45 +799,14 @@ struct hl_component_versions {
__le16 modules_offset;
__u8 component[VERSION_MAX_LEN];
__u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
__le16 modules_mask;
__u8 modules_counter;
__u8 reserved[1];
- __u8 modules[][VERSION_MAX_LEN];
-};
-
-/**
- * struct hl_fw_versions - all versions (fuse, cpucp's components with their
- * modules)
- * @struct_size: size of all the struct (including dynamic size of components).
- * @components_offset: offset of the components field in this struct.
- * @fuse: silicon production FUSE information.
- * @components_mask: i'th bit (from LSB) is a flag - on if component i in enum
- * hl_components is used.
- * @components_counter: number of set bits in components_mask.
- * @reserved: reserved for future use.
- * @components: versions of hl components. Index i corresponds to the i'th bit
- * that is *on* in components_mask. For example, if
- * components_mask=0b101, then *components represents arcpid and
- * *(hl_component_versions*)((char*)components + 1') represents
- * preboot, where 1' = components[0].struct_size.
- */
-struct hl_fw_versions {
- __le16 struct_size;
- __le16 components_offset;
- __u8 fuse[VERSION_MAX_LEN];
- __le16 components_mask;
- __u8 components_counter;
- __u8 reserved[1];
- struct hl_component_versions components[];
+ struct hl_module_data modules[];
};
-/* Max size of struct hl_component_versions */
-#define HL_COMPONENT_VERSIONS_MAX_SIZE \
- (sizeof(struct hl_component_versions) + HL_MODULES_MAX_NUM * \
- VERSION_MAX_LEN)
-
-/* Max size of struct hl_fw_versions */
-#define HL_FW_VERSIONS_MAX_SIZE (sizeof(struct hl_fw_versions) + \
- HL_COMPONENTS_MAX_NUM * HL_COMPONENT_VERSIONS_MAX_SIZE)
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index 0bf3092bfeea..452b379f39f6 100644
--- a/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -164,6 +164,8 @@
#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x4800040
+#define mmDCORE0_TPC0_EML_CFG_DBG_CNT 0x40000
+
#define SM_OBJS_PROT_BITS_OFFS 0x14000
#define DCORE_OFFSET (mmDCORE1_TPC0_QM_BASE - mmDCORE0_TPC0_QM_BASE)
@@ -185,7 +187,10 @@
#define TPC_CFG_STALL_ON_ERR_OFFSET (mmDCORE0_TPC0_CFG_STALL_ON_ERR - mmDCORE0_TPC0_CFG_BASE)
#define TPC_CFG_TPC_INTR_MASK_OFFSET (mmDCORE0_TPC0_CFG_TPC_INTR_MASK - mmDCORE0_TPC0_CFG_BASE)
#define TPC_CFG_MSS_CONFIG_OFFSET (mmDCORE0_TPC0_CFG_MSS_CONFIG - mmDCORE0_TPC0_CFG_BASE)
+#define TPC_EML_CFG_DBG_CNT_OFFSET (mmDCORE0_TPC0_EML_CFG_DBG_CNT - mmDCORE0_TPC0_EML_CFG_BASE)
+#define EDMA_CORE_CFG_STALL_OFFSET (mmDCORE0_EDMA0_CORE_CFG_1 - mmDCORE0_EDMA0_CORE_BASE)
+#define MME_CTRL_LO_QM_STALL_OFFSET (mmDCORE0_MME_CTRL_LO_QM_STALL - mmDCORE0_MME_CTRL_LO_BASE)
#define MME_ACC_INTR_MASK_OFFSET (mmDCORE0_MME_ACC_INTR_MASK - mmDCORE0_MME_ACC_BASE)
#define MME_ACC_WR_AXI_AGG_COUT0_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT0 - mmDCORE0_MME_ACC_BASE)
#define MME_ACC_WR_AXI_AGG_COUT1_OFFSET (mmDCORE0_MME_ACC_WR_AXI_AGG_COUT1 - mmDCORE0_MME_ACC_BASE)
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
index 5b4f9e108798..0231d6c55b4a 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
@@ -63,6 +63,8 @@
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
+#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 256
+
#define GAUDI2_MSIX_ENTRIES 512
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
index 50852cc80373..f661068d0c5f 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2018-2021 HabanaLabs, Ltd.
+ * Copyright 2018-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -958,7 +958,7 @@ enum gaudi2_async_event_id {
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320,
- GAUDI2_EVENT_DEV_RESET_REQ = 1321,
+ GAUDI2_EVENT_CPU_DEV_RESET_REQ = 1321,
GAUDI2_EVENT_SIZE,
};
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
index 82be01bea98e..ad01fc4e9940 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
@@ -10,6 +10,12 @@
** DO NOT EDIT BELOW **
************************************/
+enum event_reset_type {
+ EVENT_RESET_TYPE_NONE,
+ EVENT_RESET_TYPE_COMPUTE,
+ EVENT_RESET_TYPE_HARD,
+};
+
#ifndef __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
#define __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_
@@ -23,2650 +29,2650 @@ struct gaudi2_async_events_ids_map {
};
static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
- { .fc_id = 0, .cpu_id = 0, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1, .cpu_id = 1, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 2, .cpu_id = 2, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 3, .cpu_id = 3, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 4, .cpu_id = 4, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 5, .cpu_id = 5, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 6, .cpu_id = 6, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 7, .cpu_id = 7, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 8, .cpu_id = 8, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 9, .cpu_id = 9, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 10, .cpu_id = 10, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 11, .cpu_id = 11, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 12, .cpu_id = 12, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 13, .cpu_id = 13, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 14, .cpu_id = 14, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 15, .cpu_id = 15, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 16, .cpu_id = 16, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 17, .cpu_id = 17, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 18, .cpu_id = 18, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 19, .cpu_id = 19, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 20, .cpu_id = 20, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 21, .cpu_id = 21, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 22, .cpu_id = 22, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 23, .cpu_id = 23, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 24, .cpu_id = 24, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 25, .cpu_id = 25, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 26, .cpu_id = 26, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 27, .cpu_id = 27, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 28, .cpu_id = 28, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 29, .cpu_id = 29, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 30, .cpu_id = 30, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 31, .cpu_id = 31, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 32, .cpu_id = 32, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_CORE_SERR" },
- { .fc_id = 33, .cpu_id = 33, .valid = 1,
- .msg = 0, .reset = 1, .name = "PCIE_CORE_DERR" },
- { .fc_id = 34, .cpu_id = 34, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_IF_SERR" },
- { .fc_id = 35, .cpu_id = 35, .valid = 1,
- .msg = 0, .reset = 1, .name = "PCIE_IF_DERR" },
- { .fc_id = 36, .cpu_id = 36, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_PHY_SERR" },
- { .fc_id = 37, .cpu_id = 37, .valid = 1,
- .msg = 0, .reset = 1, .name = "PCIE_PHY_DERR" },
- { .fc_id = 38, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC0_ECC_SERR" },
- { .fc_id = 39, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC1_ECC_SERR" },
- { .fc_id = 40, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC2_ECC_SERR" },
- { .fc_id = 41, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC3_ECC_SERR" },
- { .fc_id = 42, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC4_ECC_SERR" },
- { .fc_id = 43, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC5_ECC_SERR" },
- { .fc_id = 44, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC6_ECC_SERR" },
- { .fc_id = 45, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC7_ECC_SERR" },
- { .fc_id = 46, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC8_ECC_SERR" },
- { .fc_id = 47, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC9_ECC_SERR" },
- { .fc_id = 48, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC10_ECC_SERR" },
- { .fc_id = 49, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC11_ECC_SERR" },
- { .fc_id = 50, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC12_ECC_SERR" },
- { .fc_id = 51, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC13_ECC_SERR" },
- { .fc_id = 52, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC14_ECC_SERR" },
- { .fc_id = 53, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC15_ECC_SERR" },
- { .fc_id = 54, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC16_ECC_SERR" },
- { .fc_id = 55, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC17_ECC_SERR" },
- { .fc_id = 56, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC18_ECC_SERR" },
- { .fc_id = 57, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC19_ECC_SERR" },
- { .fc_id = 58, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC20_ECC_SERR" },
- { .fc_id = 59, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC21_ECC_SERR" },
- { .fc_id = 60, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC22_ECC_SERR" },
- { .fc_id = 61, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC23_ECC_SERR" },
- { .fc_id = 62, .cpu_id = 38, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC24_ECC_SERR" },
- { .fc_id = 63, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC0_ECC_DERR" },
- { .fc_id = 64, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC1_ECC_DERR" },
- { .fc_id = 65, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC2_ECC_DERR" },
- { .fc_id = 66, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC3_ECC_DERR" },
- { .fc_id = 67, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC4_ECC_DERR" },
- { .fc_id = 68, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC5_ECC_DERR" },
- { .fc_id = 69, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC6_ECC_DERR" },
- { .fc_id = 70, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC7_ECC_DERR" },
- { .fc_id = 71, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC8_ECC_DERR" },
- { .fc_id = 72, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC9_ECC_DERR" },
- { .fc_id = 73, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC10_ECC_DERR" },
- { .fc_id = 74, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC11_ECC_DERR" },
- { .fc_id = 75, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC12_ECC_DERR" },
- { .fc_id = 76, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC13_ECC_DERR" },
- { .fc_id = 77, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC14_ECC_DERR" },
- { .fc_id = 78, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC15_ECC_DERR" },
- { .fc_id = 79, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC16_ECC_DERR" },
- { .fc_id = 80, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC17_ECC_DERR" },
- { .fc_id = 81, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC18_ECC_DERR" },
- { .fc_id = 82, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC19_ECC_DERR" },
- { .fc_id = 83, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC20_ECC_DERR" },
- { .fc_id = 84, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC21_ECC_DERR" },
- { .fc_id = 85, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC22_ECC_DERR" },
- { .fc_id = 86, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC23_ECC_DERR" },
- { .fc_id = 87, .cpu_id = 39, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC24_ECC_DERR" },
- { .fc_id = 88, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE0_ECC_SERR" },
- { .fc_id = 89, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE1_ECC_SERR" },
- { .fc_id = 90, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE2_ECC_SERR" },
- { .fc_id = 91, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE3_ECC_SERR" },
- { .fc_id = 92, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE4_ECC_SERR" },
- { .fc_id = 93, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_CTRL_ECC_SERR" },
- { .fc_id = 94, .cpu_id = 40, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_WAP_ECC_SERR" },
- { .fc_id = 95, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE0_ECC_SERR" },
- { .fc_id = 96, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE1_ECC_SERR" },
- { .fc_id = 97, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE2_ECC_SERR" },
- { .fc_id = 98, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE3_ECC_SERR" },
- { .fc_id = 99, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE4_ECC_SERR" },
- { .fc_id = 100, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_CTRL_ECC_SERR" },
- { .fc_id = 101, .cpu_id = 41, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_WAP_ECC_SERR" },
- { .fc_id = 102, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE0_ECC_SERR" },
- { .fc_id = 103, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE1_ECC_SERR" },
- { .fc_id = 104, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE2_ECC_SERR" },
- { .fc_id = 105, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE3_ECC_SERR" },
- { .fc_id = 106, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE4_ECC_SERR" },
- { .fc_id = 107, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_CTRL_ECC_SERR" },
- { .fc_id = 108, .cpu_id = 42, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_WAP_ECC_SERR" },
- { .fc_id = 109, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE0_ECC_SERR" },
- { .fc_id = 110, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE1_ECC_SERR" },
- { .fc_id = 111, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE2_ECC_SERR" },
- { .fc_id = 112, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE3_ECC_SERR" },
- { .fc_id = 113, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE4_ECC_SERR" },
- { .fc_id = 114, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_CTRL_ECC_SERR" },
- { .fc_id = 115, .cpu_id = 43, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_WAP_ECC_SERR" },
- { .fc_id = 116, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE0_ECC_DERR" },
- { .fc_id = 117, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE1_ECC_DERR" },
- { .fc_id = 118, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE2_ECC_DERR" },
- { .fc_id = 119, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE3_ECC_DERR" },
- { .fc_id = 120, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE4_ECC_DERR" },
- { .fc_id = 121, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_CTRL_ECC_DERR" },
- { .fc_id = 122, .cpu_id = 44, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_WAP_ECC_DERR" },
- { .fc_id = 123, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE0_ECC_DERR" },
- { .fc_id = 124, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE1_ECC_DERR" },
- { .fc_id = 125, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE2_ECC_DERR" },
- { .fc_id = 126, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE3_ECC_DERR" },
- { .fc_id = 127, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE4_ECC_DERR" },
- { .fc_id = 128, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_CTRL_ECC_DERR" },
- { .fc_id = 129, .cpu_id = 45, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_WAP_ECC_DERR" },
- { .fc_id = 130, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE0_ECC_DERR" },
- { .fc_id = 131, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE1_ECC_DERR" },
- { .fc_id = 132, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE2_ECC_DERR" },
- { .fc_id = 133, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE3_ECC_DERR" },
- { .fc_id = 134, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE4_ECC_DERR" },
- { .fc_id = 135, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_CTRL_ECC_DERR" },
- { .fc_id = 136, .cpu_id = 46, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_WAP_ECC_DERR" },
- { .fc_id = 137, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE0_ECC_DERR" },
- { .fc_id = 138, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE1_ECC_DERR" },
- { .fc_id = 139, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE2_ECC_DERR" },
- { .fc_id = 140, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE3_ECC_DERR" },
- { .fc_id = 141, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE4_ECC_DERR" },
- { .fc_id = 142, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_CTRL_ECC_DERR" },
- { .fc_id = 143, .cpu_id = 47, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_WAP_ECC_DERR" },
- { .fc_id = 144, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA2_ECC_SERR" },
- { .fc_id = 145, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA3_ECC_SERR" },
- { .fc_id = 146, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA0_ECC_SERR" },
- { .fc_id = 147, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA1_ECC_SERR" },
- { .fc_id = 148, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA6_ECC_SERR" },
- { .fc_id = 149, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA7_ECC_SERR" },
- { .fc_id = 150, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA4_ECC_SERR" },
- { .fc_id = 151, .cpu_id = 48, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA5_ECC_SERR" },
- { .fc_id = 152, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA2_ECC_DERR" },
- { .fc_id = 153, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA3_ECC_DERR" },
- { .fc_id = 154, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA0_ECC_DERR" },
- { .fc_id = 155, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA1_ECC_DERR" },
- { .fc_id = 156, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA6_ECC_DERR" },
- { .fc_id = 157, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA7_ECC_DERR" },
- { .fc_id = 158, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA4_ECC_DERR" },
- { .fc_id = 159, .cpu_id = 49, .valid = 1,
- .msg = 0, .reset = 1, .name = "HDMA5_ECC_DERR" },
- { .fc_id = 160, .cpu_id = 50, .valid = 1,
- .msg = 0, .reset = 0, .name = "KDMA0_ECC_SERR" },
- { .fc_id = 161, .cpu_id = 51, .valid = 1,
- .msg = 0, .reset = 0, .name = "PDMA0_ECC_SERR" },
- { .fc_id = 162, .cpu_id = 51, .valid = 1,
- .msg = 0, .reset = 0, .name = "PDMA1_ECC_SERR" },
- { .fc_id = 163, .cpu_id = 52, .valid = 1,
- .msg = 0, .reset = 1, .name = "KDMA0_ECC_DERR" },
- { .fc_id = 164, .cpu_id = 53, .valid = 1,
- .msg = 0, .reset = 1, .name = "PDMA0_ECC_DERR" },
- { .fc_id = 165, .cpu_id = 53, .valid = 1,
- .msg = 0, .reset = 1, .name = "PDMA1_ECC_DERR" },
- { .fc_id = 166, .cpu_id = 54, .valid = 1,
- .msg = 0, .reset = 0, .name = "CPU_IF_ECC_SERR" },
- { .fc_id = 167, .cpu_id = 55, .valid = 1,
- .msg = 0, .reset = 1, .name = "CPU_IF_ECC_DERR" },
- { .fc_id = 168, .cpu_id = 56, .valid = 1,
- .msg = 0, .reset = 0, .name = "PSOC_MEM_SERR" },
- { .fc_id = 169, .cpu_id = 57, .valid = 1,
- .msg = 0, .reset = 1, .name = "PSOC_MEM_DERR" },
- { .fc_id = 170, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM0_ECC_SERR" },
- { .fc_id = 171, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM1_ECC_SERR" },
- { .fc_id = 172, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM2_ECC_SERR" },
- { .fc_id = 173, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM3_ECC_SERR" },
- { .fc_id = 174, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM4_ECC_SERR" },
- { .fc_id = 175, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM5_ECC_SERR" },
- { .fc_id = 176, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM6_ECC_SERR" },
- { .fc_id = 177, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM7_ECC_SERR" },
- { .fc_id = 178, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM8_ECC_SERR" },
- { .fc_id = 179, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM9_ECC_SERR" },
- { .fc_id = 180, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM10_ECC_SERR" },
- { .fc_id = 181, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM11_ECC_SERR" },
- { .fc_id = 182, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM12_ECC_SERR" },
- { .fc_id = 183, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM13_ECC_SERR" },
- { .fc_id = 184, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM14_ECC_SERR" },
- { .fc_id = 185, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM15_ECC_SERR" },
- { .fc_id = 186, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM16_ECC_SERR" },
- { .fc_id = 187, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM17_ECC_SERR" },
- { .fc_id = 188, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM18_ECC_SERR" },
- { .fc_id = 189, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM19_ECC_SERR" },
- { .fc_id = 190, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM20_ECC_SERR" },
- { .fc_id = 191, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM21_ECC_SERR" },
- { .fc_id = 192, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM22_ECC_SERR" },
- { .fc_id = 193, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM23_ECC_SERR" },
- { .fc_id = 194, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM24_ECC_SERR" },
- { .fc_id = 195, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM25_ECC_SERR" },
- { .fc_id = 196, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM26_ECC_SERR" },
- { .fc_id = 197, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM27_ECC_SERR" },
- { .fc_id = 198, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM28_ECC_SERR" },
- { .fc_id = 199, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM29_ECC_SERR" },
- { .fc_id = 200, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM30_ECC_SERR" },
- { .fc_id = 201, .cpu_id = 58, .valid = 1,
- .msg = 0, .reset = 0, .name = "SRAM31_ECC_SERR" },
- { .fc_id = 202, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM0_ECC_DERR" },
- { .fc_id = 203, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM1_ECC_DERR" },
- { .fc_id = 204, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM2_ECC_DERR" },
- { .fc_id = 205, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM3_ECC_DERR" },
- { .fc_id = 206, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM4_ECC_DERR" },
- { .fc_id = 207, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM5_ECC_DERR" },
- { .fc_id = 208, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM6_ECC_DERR" },
- { .fc_id = 209, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM7_ECC_DERR" },
- { .fc_id = 210, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM8_ECC_DERR" },
- { .fc_id = 211, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM9_ECC_DERR" },
- { .fc_id = 212, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM10_ECC_DERR" },
- { .fc_id = 213, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM11_ECC_DERR" },
- { .fc_id = 214, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM12_ECC_DERR" },
- { .fc_id = 215, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM13_ECC_DERR" },
- { .fc_id = 216, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM14_ECC_DERR" },
- { .fc_id = 217, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM15_ECC_DERR" },
- { .fc_id = 218, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM16_ECC_DERR" },
- { .fc_id = 219, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM17_ECC_DERR" },
- { .fc_id = 220, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM18_ECC_DERR" },
- { .fc_id = 221, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM19_ECC_DERR" },
- { .fc_id = 222, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM20_ECC_DERR" },
- { .fc_id = 223, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM21_ECC_DERR" },
- { .fc_id = 224, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM22_ECC_DERR" },
- { .fc_id = 225, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM23_ECC_DERR" },
- { .fc_id = 226, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM24_ECC_DERR" },
- { .fc_id = 227, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM25_ECC_DERR" },
- { .fc_id = 228, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM26_ECC_DERR" },
- { .fc_id = 229, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM27_ECC_DERR" },
- { .fc_id = 230, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM28_ECC_DERR" },
- { .fc_id = 231, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM29_ECC_DERR" },
- { .fc_id = 232, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM30_ECC_DERR" },
- { .fc_id = 233, .cpu_id = 59, .valid = 1,
- .msg = 0, .reset = 1, .name = "SRAM31_ECC_DERR" },
- { .fc_id = 234, .cpu_id = 60, .valid = 1,
- .msg = 0, .reset = 1, .name = "GIC500" },
- { .fc_id = 235, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_0_MC0_ECC_SERR" },
- { .fc_id = 236, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_1_MC0_ECC_SERR" },
- { .fc_id = 237, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_2_MC0_ECC_SERR" },
- { .fc_id = 238, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_3_MC0_ECC_SERR" },
- { .fc_id = 239, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_4_MC0_ECC_SERR" },
- { .fc_id = 240, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_5_MC0_ECC_SERR" },
- { .fc_id = 241, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_0_MC1_ECC_SERR" },
- { .fc_id = 242, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_1_MC1_ECC_SERR" },
- { .fc_id = 243, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_2_MC1_ECC_SERR" },
- { .fc_id = 244, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_3_MC1_ECC_SERR" },
- { .fc_id = 245, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_4_MC1_ECC_SERR" },
- { .fc_id = 246, .cpu_id = 61, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM_5_MC1_ECC_SERR" },
- { .fc_id = 247, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_0_MC0_ECC_DERR" },
- { .fc_id = 248, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_1_MC0_ECC_DERR" },
- { .fc_id = 249, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_2_MC0_ECC_DERR" },
- { .fc_id = 250, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_3_MC0_ECC_DERR" },
- { .fc_id = 251, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_4_MC0_ECC_DERR" },
- { .fc_id = 252, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_5_MC0_ECC_DERR" },
- { .fc_id = 253, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_0_MC1_ECC_DERR" },
- { .fc_id = 254, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_1_MC1_ECC_DERR" },
- { .fc_id = 255, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_2_MC1_ECC_DERR" },
- { .fc_id = 256, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_3_MC1_ECC_DERR" },
- { .fc_id = 257, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_4_MC1_ECC_DERR" },
- { .fc_id = 258, .cpu_id = 62, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_5_MC1_ECC_DERR" },
- { .fc_id = 259, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_0_ECC_SERR" },
- { .fc_id = 260, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_1_ECC_SERR" },
- { .fc_id = 261, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_2_ECC_SERR" },
- { .fc_id = 262, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_3_ECC_SERR" },
- { .fc_id = 263, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_8_ECC_SERR" },
- { .fc_id = 264, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_9_ECC_SERR" },
- { .fc_id = 265, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_10_ECC_SERR" },
- { .fc_id = 266, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_11_ECC_SERR" },
- { .fc_id = 267, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_7_ECC_SERR" },
- { .fc_id = 268, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_6_ECC_SERR" },
- { .fc_id = 269, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_5_ECC_SERR" },
- { .fc_id = 270, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_4_ECC_SERR" },
- { .fc_id = 271, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_15_ECC_SERR" },
- { .fc_id = 272, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_14_ECC_SERR" },
- { .fc_id = 273, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_13_ECC_SERR" },
- { .fc_id = 274, .cpu_id = 63, .valid = 1,
- .msg = 0, .reset = 0, .name = "HMMU_12_ECC_SERR" },
- { .fc_id = 275, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_0_ECC_DERR" },
- { .fc_id = 276, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_1_ECC_DERR" },
- { .fc_id = 277, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_2_ECC_DERR" },
- { .fc_id = 278, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_3_ECC_DERR" },
- { .fc_id = 279, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_8_ECC_DERR" },
- { .fc_id = 280, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_9_ECC_DERR" },
- { .fc_id = 281, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_10_ECC_DERR" },
- { .fc_id = 282, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_11_ECC_DERR" },
- { .fc_id = 283, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_7_ECC_DERR" },
- { .fc_id = 284, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_6_ECC_DERR" },
- { .fc_id = 285, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_5_ECC_DERR" },
- { .fc_id = 286, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_4_ECC_DERR" },
- { .fc_id = 287, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_15_ECC_DERR" },
- { .fc_id = 288, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_14_ECC_DERR" },
- { .fc_id = 289, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_13_ECC_DERR" },
- { .fc_id = 290, .cpu_id = 64, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_12_ECC_DERR" },
- { .fc_id = 291, .cpu_id = 65, .valid = 1,
- .msg = 0, .reset = 0, .name = "PMMU_ECC_SERR" },
- { .fc_id = 292, .cpu_id = 66, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU_ECC_DERR" },
- { .fc_id = 293, .cpu_id = 67, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 294, .cpu_id = 68, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 295, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC0_VCD_ECC_SERR" },
- { .fc_id = 296, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC1_VCD_ECC_SERR" },
- { .fc_id = 297, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC2_VCD_ECC_SERR" },
- { .fc_id = 298, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC3_VCD_ECC_SERR" },
- { .fc_id = 299, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC4_VCD_ECC_SERR" },
- { .fc_id = 300, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC5_VCD_ECC_SERR" },
- { .fc_id = 301, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC6_VCD_ECC_SERR" },
- { .fc_id = 302, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC7_VCD_ECC_SERR" },
- { .fc_id = 303, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC8_VCD_ECC_SERR" },
- { .fc_id = 304, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC9_VCD_ECC_SERR" },
- { .fc_id = 305, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC0_L2C_ECC_SERR" },
- { .fc_id = 306, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC1_L2C_ECC_SERR" },
- { .fc_id = 307, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC2_L2C_ECC_SERR" },
- { .fc_id = 308, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC3_L2C_ECC_SERR" },
- { .fc_id = 309, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC4_L2C_ECC_SERR" },
- { .fc_id = 310, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC5_L2C_ECC_SERR" },
- { .fc_id = 311, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC6_L2C_ECC_SERR" },
- { .fc_id = 312, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC7_L2C_ECC_SERR" },
- { .fc_id = 313, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC8_L2C_ECC_SERR" },
- { .fc_id = 314, .cpu_id = 69, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC9_L2C_ECC_SERR" },
- { .fc_id = 315, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC0_VCD_ECC_DERR" },
- { .fc_id = 316, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC1_VCD_ECC_DERR" },
- { .fc_id = 317, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC2_VCD_ECC_DERR" },
- { .fc_id = 318, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC3_VCD_ECC_DERR" },
- { .fc_id = 319, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC4_VCD_ECC_DERR" },
- { .fc_id = 320, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC5_VCD_ECC_DERR" },
- { .fc_id = 321, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC6_VCD_ECC_DERR" },
- { .fc_id = 322, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC7_VCD_ECC_DERR" },
- { .fc_id = 323, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC8_VCD_ECC_DERR" },
- { .fc_id = 324, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC9_VCD_ECC_DERR" },
- { .fc_id = 325, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC0_L2C_ECC_DERR" },
- { .fc_id = 326, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC1_L2C_ECC_DERR" },
- { .fc_id = 327, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC2_L2C_ECC_DERR" },
- { .fc_id = 328, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC3_L2C_ECC_DERR" },
- { .fc_id = 329, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC4_L2C_ECC_DERR" },
- { .fc_id = 330, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC5_L2C_ECC_DERR" },
- { .fc_id = 331, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC6_L2C_ECC_DERR" },
- { .fc_id = 332, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC7_L2C_ECC_DERR" },
- { .fc_id = 333, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC8_L2C_ECC_DERR" },
- { .fc_id = 334, .cpu_id = 70, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC9_L2C_ECC_DERR" },
- { .fc_id = 335, .cpu_id = 71, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 336, .cpu_id = 72, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 337, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF0_ECC_SERR" },
- { .fc_id = 338, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF1_ECC_SERR" },
- { .fc_id = 339, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF2_ECC_SERR" },
- { .fc_id = 340, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF3_ECC_SERR" },
- { .fc_id = 341, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF8_ECC_SERR" },
- { .fc_id = 342, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF9_ECC_SERR" },
- { .fc_id = 343, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF10_ECC_SERR" },
- { .fc_id = 344, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF11_ECC_SERR" },
- { .fc_id = 345, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF7_ECC_SERR" },
- { .fc_id = 346, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF6_ECC_SERR" },
- { .fc_id = 347, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF5_ECC_SERR" },
- { .fc_id = 348, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF4_ECC_SERR" },
- { .fc_id = 349, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF15_ECC_SERR" },
- { .fc_id = 350, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF14_ECC_SERR" },
- { .fc_id = 351, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF13_ECC_SERR" },
- { .fc_id = 352, .cpu_id = 73, .valid = 1,
- .msg = 0, .reset = 0, .name = "HIF12_ECC_SERR" },
- { .fc_id = 353, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF0_ECC_DERR" },
- { .fc_id = 354, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF1_ECC_DERR" },
- { .fc_id = 355, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF2_ECC_DERR" },
- { .fc_id = 356, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF3_ECC_DERR" },
- { .fc_id = 357, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF8_ECC_DERR" },
- { .fc_id = 358, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF9_ECC_DERR" },
- { .fc_id = 359, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF10_ECC_DERR" },
- { .fc_id = 360, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF11_ECC_DERR" },
- { .fc_id = 361, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF7_ECC_DERR" },
- { .fc_id = 362, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF6_ECC_DERR" },
- { .fc_id = 363, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF5_ECC_DERR" },
- { .fc_id = 364, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF4_ECC_DERR" },
- { .fc_id = 365, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF15_ECC_DERR" },
- { .fc_id = 366, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF14_ECC_DERR" },
- { .fc_id = 367, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF13_ECC_DERR" },
- { .fc_id = 368, .cpu_id = 74, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF12_ECC_DERR" },
- { .fc_id = 369, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC0_ECC_SERR" },
- { .fc_id = 370, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC1_ECC_SERR" },
- { .fc_id = 371, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC2_ECC_SERR" },
- { .fc_id = 372, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC3_ECC_SERR" },
- { .fc_id = 373, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC4_ECC_SERR" },
- { .fc_id = 374, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC5_ECC_SERR" },
- { .fc_id = 375, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC6_ECC_SERR" },
- { .fc_id = 376, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC7_ECC_SERR" },
- { .fc_id = 377, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC8_ECC_SERR" },
- { .fc_id = 378, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC9_ECC_SERR" },
- { .fc_id = 379, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC10_ECC_SERR" },
- { .fc_id = 380, .cpu_id = 75, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC11_ECC_SERR" },
- { .fc_id = 381, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC0_ECC_DERR" },
- { .fc_id = 382, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC1_ECC_DERR" },
- { .fc_id = 383, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC2_ECC_DERR" },
- { .fc_id = 384, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC3_ECC_DERR" },
- { .fc_id = 385, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC4_ECC_DERR" },
- { .fc_id = 386, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC5_ECC_DERR" },
- { .fc_id = 387, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC6_ECC_DERR" },
- { .fc_id = 388, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC7_ECC_DERR" },
- { .fc_id = 389, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC8_ECC_DERR" },
- { .fc_id = 390, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC9_ECC_DERR" },
- { .fc_id = 391, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC10_ECC_DERR" },
- { .fc_id = 392, .cpu_id = 76, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC11_ECC_DERR" },
- { .fc_id = 393, .cpu_id = 77, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM0_ECC_DERR" },
- { .fc_id = 394, .cpu_id = 77, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM1_ECC_DERR" },
- { .fc_id = 395, .cpu_id = 77, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM2_ECC_DERR" },
- { .fc_id = 396, .cpu_id = 77, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM3_ECC_DERR" },
- { .fc_id = 397, .cpu_id = 78, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM0_ECC_SERR" },
- { .fc_id = 398, .cpu_id = 78, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM1_ECC_SERR" },
- { .fc_id = 399, .cpu_id = 78, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM2_ECC_SERR" },
- { .fc_id = 400, .cpu_id = 78, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM3_ECC_SERR" },
- { .fc_id = 401, .cpu_id = 79, .valid = 1,
- .msg = 0, .reset = 0, .name = "XBAR0_ECC_SERR" },
- { .fc_id = 402, .cpu_id = 79, .valid = 1,
- .msg = 0, .reset = 0, .name = "XBAR1_ECC_SERR" },
- { .fc_id = 403, .cpu_id = 79, .valid = 1,
- .msg = 0, .reset = 0, .name = "XBAR2_ECC_SERR" },
- { .fc_id = 404, .cpu_id = 79, .valid = 1,
- .msg = 0, .reset = 0, .name = "XBAR3_ECC_SERR" },
- { .fc_id = 405, .cpu_id = 80, .valid = 1,
- .msg = 0, .reset = 1, .name = "XBAR0_ECC_DERR" },
- { .fc_id = 406, .cpu_id = 80, .valid = 1,
- .msg = 0, .reset = 1, .name = "XBAR1_ECC_DERR" },
- { .fc_id = 407, .cpu_id = 80, .valid = 1,
- .msg = 0, .reset = 1, .name = "XBAR2_ECC_DERR" },
- { .fc_id = 408, .cpu_id = 80, .valid = 1,
- .msg = 0, .reset = 1, .name = "XBAR3_ECC_DERR" },
- { .fc_id = 409, .cpu_id = 81, .valid = 1,
- .msg = 0, .reset = 0, .name = "ARC0_ECC_SERR" },
- { .fc_id = 410, .cpu_id = 82, .valid = 1,
- .msg = 0, .reset = 1, .name = "ARC0_ECC_DERR" },
- { .fc_id = 411, .cpu_id = 83, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 412, .cpu_id = 84, .valid = 1,
- .msg = 0, .reset = 1, .name = "PCIE_ADDR_DEC_ERR" },
- { .fc_id = 413, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC0_AXI_ERR_RSP" },
- { .fc_id = 414, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC1_AXI_ERR_RSP" },
- { .fc_id = 415, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC2_AXI_ERR_RSP" },
- { .fc_id = 416, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC3_AXI_ERR_RSP" },
- { .fc_id = 417, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC4_AXI_ERR_RSP" },
- { .fc_id = 418, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC5_AXI_ERR_RSP" },
- { .fc_id = 419, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC6_AXI_ERR_RSP" },
- { .fc_id = 420, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC7_AXI_ERR_RSP" },
- { .fc_id = 421, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC8_AXI_ERR_RSP" },
- { .fc_id = 422, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC9_AXI_ERR_RSP" },
- { .fc_id = 423, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC10_AXI_ERR_RSP" },
- { .fc_id = 424, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC11_AXI_ERR_RSP" },
- { .fc_id = 425, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC12_AXI_ERR_RSP" },
- { .fc_id = 426, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC13_AXI_ERR_RSP" },
- { .fc_id = 427, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC14_AXI_ERR_RSP" },
- { .fc_id = 428, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC15_AXI_ERR_RSP" },
- { .fc_id = 429, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC16_AXI_ERR_RSP" },
- { .fc_id = 430, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC17_AXI_ERR_RSP" },
- { .fc_id = 431, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC18_AXI_ERR_RSP" },
- { .fc_id = 432, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC19_AXI_ERR_RSP" },
- { .fc_id = 433, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC20_AXI_ERR_RSP" },
- { .fc_id = 434, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC21_AXI_ERR_RSP" },
- { .fc_id = 435, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC22_AXI_ERR_RSP" },
- { .fc_id = 436, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC23_AXI_ERR_RSP" },
- { .fc_id = 437, .cpu_id = 85, .valid = 1,
- .msg = 0, .reset = 1, .name = "TPC24_AXI_ERR_RSP" },
- { .fc_id = 438, .cpu_id = 86, .valid = 1,
- .msg = 0, .reset = 1, .name = "AXI_ECC" },
- { .fc_id = 439, .cpu_id = 87, .valid = 1,
- .msg = 0, .reset = 1, .name = "L2_RAM_ECC" },
- { .fc_id = 440, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE0_AXI_ERR_RSP" },
- { .fc_id = 441, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE1_AXI_ERR_RSP" },
- { .fc_id = 442, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE2_AXI_ERR_RSP" },
- { .fc_id = 443, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE3_AXI_ERR_RSP" },
- { .fc_id = 444, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_SBTE4_AXI_ERR_RSP" },
- { .fc_id = 445, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_CTRL_AXI_ERROR_RESPONSE" },
- { .fc_id = 446, .cpu_id = 88, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME0_QMAN_SW_ERROR" },
- { .fc_id = 447, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE0_AXI_ERR_RSP" },
- { .fc_id = 448, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE1_AXI_ERR_RSP" },
- { .fc_id = 449, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE2_AXI_ERR_RSP" },
- { .fc_id = 450, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE3_AXI_ERR_RSP" },
- { .fc_id = 451, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_SBTE4_AXI_ERR_RSP" },
- { .fc_id = 452, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_CTRL_AXI_ERROR_RESPONSE" },
- { .fc_id = 453, .cpu_id = 89, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME1_QMAN_SW_ERROR" },
- { .fc_id = 454, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE0_AXI_ERR_RSP" },
- { .fc_id = 455, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE1_AXI_ERR_RSP" },
- { .fc_id = 456, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE2_AXI_ERR_RSP" },
- { .fc_id = 457, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE3_AXI_ERR_RSP" },
- { .fc_id = 458, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_SBTE4_AXI_ERR_RSP" },
- { .fc_id = 459, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_CTRL_AXI_ERROR_RESPONSE" },
- { .fc_id = 460, .cpu_id = 90, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME2_QMAN_SW_ERROR" },
- { .fc_id = 461, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE0_AXI_ERR_RSP" },
- { .fc_id = 462, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE1_AXI_ERR_RSP" },
- { .fc_id = 463, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE2_AXI_ERR_RSP" },
- { .fc_id = 464, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE3_AXI_ERR_RSP" },
- { .fc_id = 465, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_SBTE4_AXI_ERR_RSP" },
- { .fc_id = 466, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
- { .fc_id = 467, .cpu_id = 91, .valid = 1,
- .msg = 0, .reset = 1, .name = "MME3_QMAN_SW_ERROR" },
- { .fc_id = 468, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "PSOC_MME_PLL_LOCK_ERR" },
- { .fc_id = 469, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "PSOC_CPU_PLL_LOCK_ERR" },
- { .fc_id = 470, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_TPC_PLL_LOCK_ERR" },
- { .fc_id = 471, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_NIC_PLL_LOCK_ERR" },
- { .fc_id = 472, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 473, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 474, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 475, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
- { .fc_id = 476, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 477, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 478, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 479, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
- { .fc_id = 480, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_TPC_PLL_LOCK_ERR" },
- { .fc_id = 481, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_NIC_PLL_LOCK_ERR" },
- { .fc_id = 482, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU_MME_PLL_LOCK_ERR" },
- { .fc_id = 483, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_TPC_PLL_LOCK_ERR" },
- { .fc_id = 484, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_PCI_PLL_LOCK_ERR" },
- { .fc_id = 485, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 486, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 487, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 488, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
- { .fc_id = 489, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
- { .fc_id = 490, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
- { .fc_id = 491, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
- { .fc_id = 492, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
- { .fc_id = 493, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_TPC_PLL_LOCK_ERR" },
- { .fc_id = 494, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "PSOC_VID_PLL_LOCK_ERR" },
- { .fc_id = 495, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU_VID_PLL_LOCK_ERR" },
- { .fc_id = 496, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE3_HBM_PLL_LOCK_ERR" },
- { .fc_id = 497, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
- { .fc_id = 498, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE1_HBM_PLL_LOCK_ERR" },
- { .fc_id = 499, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE0_HBM_PLL_LOCK_ERR" },
- { .fc_id = 500, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
- { .fc_id = 501, .cpu_id = 92, .valid = 1,
- .msg = 0, .reset = 1, .name = "DCORE2_HBM_PLL_LOCK_ERR" },
- { .fc_id = 502, .cpu_id = 93, .valid = 1,
- .msg = 0, .reset = 1, .name = "CPU_AXI_ERR_RSP" },
- { .fc_id = 503, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_0_AXI_ERR_RSP" },
- { .fc_id = 504, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_1_AXI_ERR_RSP" },
- { .fc_id = 505, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_2_AXI_ERR_RSP" },
- { .fc_id = 506, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_3_AXI_ERR_RSP" },
- { .fc_id = 507, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_8_AXI_ERR_RSP" },
- { .fc_id = 508, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_9_AXI_ERR_RSP" },
- { .fc_id = 509, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_10_AXI_ERR_RSP" },
- { .fc_id = 510, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_11_AXI_ERR_RSP" },
- { .fc_id = 511, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_7_AXI_ERR_RSP" },
- { .fc_id = 512, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_6_AXI_ERR_RSP" },
- { .fc_id = 513, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_5_AXI_ERR_RSP" },
- { .fc_id = 514, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_4_AXI_ERR_RSP" },
- { .fc_id = 515, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_15_AXI_ERR_RSP" },
- { .fc_id = 516, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_14_AXI_ERR_RSP" },
- { .fc_id = 517, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_13_AXI_ERR_RSP" },
- { .fc_id = 518, .cpu_id = 94, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU_12_AXI_ERR_RSP" },
- { .fc_id = 519, .cpu_id = 95, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU_FATAL" },
- { .fc_id = 520, .cpu_id = 96, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU_AXI_ERR_RSP" },
- { .fc_id = 521, .cpu_id = 97, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM0_ALARM_A" },
- { .fc_id = 522, .cpu_id = 98, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM0_ALARM_B" },
- { .fc_id = 523, .cpu_id = 99, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM1_ALARM_A" },
- { .fc_id = 524, .cpu_id = 100, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM1_ALARM_B" },
- { .fc_id = 525, .cpu_id = 101, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM2_ALARM_A" },
- { .fc_id = 526, .cpu_id = 102, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM2_ALARM_B" },
- { .fc_id = 527, .cpu_id = 103, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM3_ALARM_A" },
- { .fc_id = 528, .cpu_id = 104, .valid = 1,
- .msg = 0, .reset = 0, .name = "VM3_ALARM_B" },
- { .fc_id = 529, .cpu_id = 105, .valid = 1,
- .msg = 0, .reset = 1, .name = "PSOC_AXI_ERR_RSP" },
- { .fc_id = 530, .cpu_id = 106, .valid = 1,
- .msg = 0, .reset = 0, .name = "PSOC_PRSTN_FALL" },
- { .fc_id = 531, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 532, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 533, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 534, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 535, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 536, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 537, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 538, .cpu_id = 107, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 539, .cpu_id = 108, .valid = 1,
- .msg = 0, .reset = 1, .name = "KDMA_CH0_AXI_ERR_RSP" },
- { .fc_id = 540, .cpu_id = 109, .valid = 1,
- .msg = 0, .reset = 1, .name = "PDMA_CH0_AXI_ERR_RSP" },
- { .fc_id = 541, .cpu_id = 109, .valid = 1,
- .msg = 0, .reset = 1, .name = "PDMA_CH1_AXI_ERR_RSP" },
- { .fc_id = 542, .cpu_id = 110, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_0" },
- { .fc_id = 543, .cpu_id = 111, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_1" },
- { .fc_id = 544, .cpu_id = 112, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_2" },
- { .fc_id = 545, .cpu_id = 113, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_3" },
- { .fc_id = 546, .cpu_id = 114, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_4" },
- { .fc_id = 547, .cpu_id = 115, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM_CATTRIP_5" },
- { .fc_id = 548, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM0_MC0_SEI_SEVERE" },
- { .fc_id = 549, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM0_MC0_SEI_NON_SEVERE" },
- { .fc_id = 550, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM0_MC1_SEI_SEVERE" },
- { .fc_id = 551, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM0_MC1_SEI_NON_SEVERE" },
- { .fc_id = 552, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM1_MC0_SEI_SEVERE" },
- { .fc_id = 553, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM1_MC0_SEI_NON_SEVERE" },
- { .fc_id = 554, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM1_MC1_SEI_SEVERE" },
- { .fc_id = 555, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM1_MC1_SEI_NON_SEVERE" },
- { .fc_id = 556, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM2_MC0_SEI_SEVERE" },
- { .fc_id = 557, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM2_MC0_SEI_NON_SEVERE" },
- { .fc_id = 558, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM2_MC1_SEI_SEVERE" },
- { .fc_id = 559, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM2_MC1_SEI_NON_SEVERE" },
- { .fc_id = 560, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM3_MC0_SEI_SEVERE" },
- { .fc_id = 561, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM3_MC0_SEI_NON_SEVERE" },
- { .fc_id = 562, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM3_MC1_SEI_SEVERE" },
- { .fc_id = 563, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM3_MC1_SEI_NON_SEVERE" },
- { .fc_id = 564, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM4_MC0_SEI_SEVERE" },
- { .fc_id = 565, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM4_MC0_SEI_NON_SEVERE" },
- { .fc_id = 566, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM4_MC1_SEI_SEVERE" },
- { .fc_id = 567, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM4_MC1_SEI_NON_SEVERE" },
- { .fc_id = 568, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM5_MC0_SEI_SEVERE" },
- { .fc_id = 569, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM5_MC0_SEI_NON_SEVERE" },
- { .fc_id = 570, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 1, .name = "HBM5_MC1_SEI_SEVERE" },
- { .fc_id = 571, .cpu_id = 116, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM5_MC1_SEI_NON_SEVERE" },
- { .fc_id = 572, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC0_AXI_ERR_RSPONSE" },
- { .fc_id = 573, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC1_AXI_ERR_RSPONSE" },
- { .fc_id = 574, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC2_AXI_ERR_RSPONSE" },
- { .fc_id = 575, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC3_AXI_ERR_RSPONSE" },
- { .fc_id = 576, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC4_AXI_ERR_RSPONSE" },
- { .fc_id = 577, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC5_AXI_ERR_RSPONSE" },
- { .fc_id = 578, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC6_AXI_ERR_RSPONSE" },
- { .fc_id = 579, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC7_AXI_ERR_RSPONSE" },
- { .fc_id = 580, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC8_AXI_ERR_RSPONSE" },
- { .fc_id = 581, .cpu_id = 117, .valid = 1,
- .msg = 0, .reset = 1, .name = "DEC9_AXI_ERR_RSPONSE" },
- { .fc_id = 582, .cpu_id = 118, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 583, .cpu_id = 119, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 584, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF0_FATAL" },
- { .fc_id = 585, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF1_FATAL" },
- { .fc_id = 586, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF2_FATAL" },
- { .fc_id = 587, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF3_FATAL" },
- { .fc_id = 588, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF8_FATAL" },
- { .fc_id = 589, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF9_FATAL" },
- { .fc_id = 590, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF10_FATAL" },
- { .fc_id = 591, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF11_FATAL" },
- { .fc_id = 592, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF7_FATAL" },
- { .fc_id = 593, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF6_FATAL" },
- { .fc_id = 594, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF5_FATAL" },
- { .fc_id = 595, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF4_FATAL" },
- { .fc_id = 596, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF15_FATAL" },
- { .fc_id = 597, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF14_FATAL" },
- { .fc_id = 598, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF13_FATAL" },
- { .fc_id = 599, .cpu_id = 120, .valid = 1,
- .msg = 0, .reset = 1, .name = "HIF12_FATAL" },
- { .fc_id = 600, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC0_AXI_ERROR_RESPONSE" },
- { .fc_id = 601, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC1_AXI_ERROR_RESPONSE" },
- { .fc_id = 602, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC2_AXI_ERROR_RESPONSE" },
- { .fc_id = 603, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC3_AXI_ERROR_RESPONSE" },
- { .fc_id = 604, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC4_AXI_ERROR_RESPONSE" },
- { .fc_id = 605, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC5_AXI_ERROR_RESPONSE" },
- { .fc_id = 606, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC6_AXI_ERROR_RESPONSE" },
- { .fc_id = 607, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC7_AXI_ERROR_RESPONSE" },
- { .fc_id = 608, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC8_AXI_ERROR_RESPONSE" },
- { .fc_id = 609, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC9_AXI_ERROR_RESPONSE" },
- { .fc_id = 610, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC10_AXI_ERROR_RESPONSE" },
- { .fc_id = 611, .cpu_id = 121, .valid = 1,
- .msg = 0, .reset = 1, .name = "NIC11_AXI_ERROR_RESPONSE" },
- { .fc_id = 612, .cpu_id = 122, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM0_AXI_ERROR_RESPONSE" },
- { .fc_id = 613, .cpu_id = 122, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM1_AXI_ERROR_RESPONSE" },
- { .fc_id = 614, .cpu_id = 122, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM2_AXI_ERROR_RESPONSE" },
- { .fc_id = 615, .cpu_id = 122, .valid = 1,
- .msg = 0, .reset = 1, .name = "SM3_AXI_ERROR_RESPONSE" },
- { .fc_id = 616, .cpu_id = 123, .valid = 1,
- .msg = 0, .reset = 1, .name = "ARC_AXI_ERROR_RESPONSE" },
- { .fc_id = 617, .cpu_id = 124, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 618, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 619, .cpu_id = 125, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_FLR_REQUESTED" },
- { .fc_id = 620, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 621, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 622, .cpu_id = 125, .valid = 1,
- .msg = 0, .reset = 1, .name = "PCIE_APB_TIMEOUT" },
- { .fc_id = 623, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 624, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 625, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 626, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 627, .cpu_id = 125, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_FATAL_ERR" },
- { .fc_id = 628, .cpu_id = 125, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 629, .cpu_id = 126, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 630, .cpu_id = 127, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 631, .cpu_id = 128, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_P2P_MSIX" },
- { .fc_id = 632, .cpu_id = 129, .valid = 1,
- .msg = 0, .reset = 0, .name = "PCIE_DRAIN_COMPLETE" },
- { .fc_id = 633, .cpu_id = 130, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC0_BMON_SPMU" },
- { .fc_id = 634, .cpu_id = 131, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC0_KERNEL_ERR" },
- { .fc_id = 635, .cpu_id = 132, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC1_BMON_SPMU" },
- { .fc_id = 636, .cpu_id = 133, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC1_KERNEL_ERR" },
- { .fc_id = 637, .cpu_id = 134, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC2_BMON_SPMU" },
- { .fc_id = 638, .cpu_id = 135, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC2_KERNEL_ERR" },
- { .fc_id = 639, .cpu_id = 136, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC3_BMON_SPMU" },
- { .fc_id = 640, .cpu_id = 137, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC3_KERNEL_ERR" },
- { .fc_id = 641, .cpu_id = 138, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC4_BMON_SPMU" },
- { .fc_id = 642, .cpu_id = 139, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC4_KERNEL_ERR" },
- { .fc_id = 643, .cpu_id = 140, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC5_BMON_SPMU" },
- { .fc_id = 644, .cpu_id = 141, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC5_KERNEL_ERR" },
- { .fc_id = 645, .cpu_id = 150, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC6_BMON_SPMU" },
- { .fc_id = 646, .cpu_id = 151, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC6_KERNEL_ERR" },
- { .fc_id = 647, .cpu_id = 152, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC7_BMON_SPMU" },
- { .fc_id = 648, .cpu_id = 153, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC7_KERNEL_ERR" },
- { .fc_id = 649, .cpu_id = 146, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC8_BMON_SPMU" },
- { .fc_id = 650, .cpu_id = 147, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC8_KERNEL_ERR" },
- { .fc_id = 651, .cpu_id = 148, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC9_BMON_SPMU" },
- { .fc_id = 652, .cpu_id = 149, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC9_KERNEL_ERR" },
- { .fc_id = 653, .cpu_id = 142, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC10_BMON_SPMU" },
- { .fc_id = 654, .cpu_id = 143, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC10_KERNEL_ERR" },
- { .fc_id = 655, .cpu_id = 144, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC11_BMON_SPMU" },
- { .fc_id = 656, .cpu_id = 145, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC11_KERNEL_ERR" },
- { .fc_id = 657, .cpu_id = 162, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC12_BMON_SPMU" },
- { .fc_id = 658, .cpu_id = 163, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC12_KERNEL_ERR" },
- { .fc_id = 659, .cpu_id = 164, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC13_BMON_SPMU" },
- { .fc_id = 660, .cpu_id = 165, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC13_KERNEL_ERR" },
- { .fc_id = 661, .cpu_id = 158, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC14_BMON_SPMU" },
- { .fc_id = 662, .cpu_id = 159, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC14_KERNEL_ERR" },
- { .fc_id = 663, .cpu_id = 160, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC15_BMON_SPMU" },
- { .fc_id = 664, .cpu_id = 161, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC15_KERNEL_ERR" },
- { .fc_id = 665, .cpu_id = 154, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC16_BMON_SPMU" },
- { .fc_id = 666, .cpu_id = 155, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC16_KERNEL_ERR" },
- { .fc_id = 667, .cpu_id = 156, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC17_BMON_SPMU" },
- { .fc_id = 668, .cpu_id = 157, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC17_KERNEL_ERR" },
- { .fc_id = 669, .cpu_id = 166, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC18_BMON_SPMU" },
- { .fc_id = 670, .cpu_id = 167, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC18_KERNEL_ERR" },
- { .fc_id = 671, .cpu_id = 168, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC19_BMON_SPMU" },
- { .fc_id = 672, .cpu_id = 169, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC19_KERNEL_ERR" },
- { .fc_id = 673, .cpu_id = 170, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC20_BMON_SPMU" },
- { .fc_id = 674, .cpu_id = 171, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC20_KERNEL_ERR" },
- { .fc_id = 675, .cpu_id = 172, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC21_BMON_SPMU" },
- { .fc_id = 676, .cpu_id = 173, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC21_KERNEL_ERR" },
- { .fc_id = 677, .cpu_id = 174, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC22_BMON_SPMU" },
- { .fc_id = 678, .cpu_id = 175, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC22_KERNEL_ERR" },
- { .fc_id = 679, .cpu_id = 176, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC23_BMON_SPMU" },
- { .fc_id = 680, .cpu_id = 177, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC23_KERNEL_ERR" },
- { .fc_id = 681, .cpu_id = 178, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC24_BMON_SPMU" },
- { .fc_id = 682, .cpu_id = 179, .valid = 1,
- .msg = 0, .reset = 0, .name = "TPC24_KERNEL_ERR" },
- { .fc_id = 683, .cpu_id = 180, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 684, .cpu_id = 180, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 685, .cpu_id = 180, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 686, .cpu_id = 180, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 687, .cpu_id = 180, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 688, .cpu_id = 180, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_CTRL_BMON_SPMU" },
- { .fc_id = 689, .cpu_id = 180, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_SBTE_BMON_SPMU" },
- { .fc_id = 690, .cpu_id = 180, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_WAP_BMON_SPMU" },
- { .fc_id = 691, .cpu_id = 180, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME0_WAP_SOURCE_RESULT_INVALID" },
- { .fc_id = 692, .cpu_id = 181, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 693, .cpu_id = 181, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 694, .cpu_id = 181, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 695, .cpu_id = 181, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 696, .cpu_id = 181, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 697, .cpu_id = 181, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_CTRL_BMON_SPMU" },
- { .fc_id = 698, .cpu_id = 181, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_SBTE_BMON_SPMU" },
- { .fc_id = 699, .cpu_id = 181, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_WAP_BMON_SPMU" },
- { .fc_id = 700, .cpu_id = 181, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME1_WAP_SOURCE_RESULT_INVALID" },
- { .fc_id = 701, .cpu_id = 182, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 702, .cpu_id = 182, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 703, .cpu_id = 182, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 704, .cpu_id = 182, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 705, .cpu_id = 182, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 706, .cpu_id = 182, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_CTRL_BMON_SPMU" },
- { .fc_id = 707, .cpu_id = 182, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_SBTE_BMON_SPMU" },
- { .fc_id = 708, .cpu_id = 182, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_WAP_BMON_SPMU" },
- { .fc_id = 709, .cpu_id = 182, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME2_WAP_SOURCE_RESULT_INVALID" },
- { .fc_id = 710, .cpu_id = 183, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 711, .cpu_id = 183, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 712, .cpu_id = 183, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 713, .cpu_id = 183, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 714, .cpu_id = 183, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 715, .cpu_id = 183, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_CTRL_BMON_SPMU" },
- { .fc_id = 716, .cpu_id = 183, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_SBTE_BMON_SPMU" },
- { .fc_id = 717, .cpu_id = 183, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_WAP_BMON_SPMU" },
- { .fc_id = 718, .cpu_id = 183, .valid = 1,
- .msg = 0, .reset = 0, .name = "MME3_WAP_SOURCE_RESULT_INVALID" },
- { .fc_id = 719, .cpu_id = 184, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 720, .cpu_id = 184, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU0_PAGE_FAULT_OR_WR_PERM" },
- { .fc_id = 721, .cpu_id = 184, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU0_SECURITY_ERROR" },
- { .fc_id = 722, .cpu_id = 185, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 723, .cpu_id = 185, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU1_PAGE_FAULT_WR_PERM" },
- { .fc_id = 724, .cpu_id = 185, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU1_SECURITY_ERROR" },
- { .fc_id = 725, .cpu_id = 186, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 726, .cpu_id = 186, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU2_PAGE_FAULT_WR_PERM" },
- { .fc_id = 727, .cpu_id = 186, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU2_SECURITY_ERROR" },
- { .fc_id = 728, .cpu_id = 187, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 729, .cpu_id = 187, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU3_PAGE_FAULT_WR_PERM" },
- { .fc_id = 730, .cpu_id = 187, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU3_SECURITY_ERROR" },
- { .fc_id = 731, .cpu_id = 188, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 732, .cpu_id = 188, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU8_PAGE_FAULT_WR_PERM" },
- { .fc_id = 733, .cpu_id = 188, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU8_SECURITY_ERROR" },
- { .fc_id = 734, .cpu_id = 189, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 735, .cpu_id = 189, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU9_PAGE_FAULT_WR_PERM" },
- { .fc_id = 736, .cpu_id = 189, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU9_SECURITY_ERROR" },
- { .fc_id = 737, .cpu_id = 190, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 738, .cpu_id = 190, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU10_PAGE_FAULT_WR_PERM" },
- { .fc_id = 739, .cpu_id = 190, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU10_SECURITY_ERROR" },
- { .fc_id = 740, .cpu_id = 191, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 741, .cpu_id = 191, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU11_PAGE_FAULT_WR_PERM" },
- { .fc_id = 742, .cpu_id = 191, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU11_SECURITY_ERROR" },
- { .fc_id = 743, .cpu_id = 192, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 744, .cpu_id = 192, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU7_PAGE_FAULT_WR_PERM" },
- { .fc_id = 745, .cpu_id = 192, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU7_SECURITY_ERROR" },
- { .fc_id = 746, .cpu_id = 193, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 747, .cpu_id = 193, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU6_PAGE_FAULT_WR_PERM" },
- { .fc_id = 748, .cpu_id = 193, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU6_SECURITY_ERROR" },
- { .fc_id = 749, .cpu_id = 194, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 750, .cpu_id = 194, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU5_PAGE_FAULT_WR_PERM" },
- { .fc_id = 751, .cpu_id = 194, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU5_SECURITY_ERROR" },
- { .fc_id = 752, .cpu_id = 195, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 753, .cpu_id = 195, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU4_PAGE_FAULT_WR_PERM" },
- { .fc_id = 754, .cpu_id = 195, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU4_SECURITY_ERROR" },
- { .fc_id = 755, .cpu_id = 196, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 756, .cpu_id = 196, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU15_PAGE_FAULT_WR_PERM" },
- { .fc_id = 757, .cpu_id = 196, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU15_SECURITY_ERROR" },
- { .fc_id = 758, .cpu_id = 197, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 759, .cpu_id = 197, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU14_PAGE_FAULT_WR_PERM" },
- { .fc_id = 760, .cpu_id = 197, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU14_SECURITY_ERROR" },
- { .fc_id = 761, .cpu_id = 198, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 762, .cpu_id = 198, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU13_PAGE_FAULT_WR_PERM" },
- { .fc_id = 763, .cpu_id = 198, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU13_SECURITY_ERROR" },
- { .fc_id = 764, .cpu_id = 199, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 765, .cpu_id = 199, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU12_PAGE_FAULT_WR_PERM" },
- { .fc_id = 766, .cpu_id = 199, .valid = 1,
- .msg = 0, .reset = 1, .name = "HMMU12_SECURITY_ERROR" },
- { .fc_id = 767, .cpu_id = 200, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 768, .cpu_id = 201, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU0_PAGE_FAULT_WR_PERM" },
- { .fc_id = 769, .cpu_id = 202, .valid = 1,
- .msg = 0, .reset = 1, .name = "PMMU0_SECURITY_ERROR" },
- { .fc_id = 770, .cpu_id = 203, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA2_BM_SPMU" },
- { .fc_id = 771, .cpu_id = 204, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 772, .cpu_id = 205, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA3_BM_SPMU" },
- { .fc_id = 773, .cpu_id = 206, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 774, .cpu_id = 207, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA0_BM_SPMU" },
- { .fc_id = 775, .cpu_id = 208, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 776, .cpu_id = 209, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA1_BM_SPMU" },
- { .fc_id = 777, .cpu_id = 210, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 778, .cpu_id = 211, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA6_BM_SPMU" },
- { .fc_id = 779, .cpu_id = 212, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 780, .cpu_id = 213, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA7_BM_SPMU" },
- { .fc_id = 781, .cpu_id = 214, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 782, .cpu_id = 215, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA4_BM_SPMU" },
- { .fc_id = 783, .cpu_id = 216, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 784, .cpu_id = 217, .valid = 1,
- .msg = 0, .reset = 0, .name = "HDMA5_BM_SPMU" },
- { .fc_id = 785, .cpu_id = 218, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 786, .cpu_id = 219, .valid = 1,
- .msg = 0, .reset = 0, .name = "KDMA_BM_SPMU" },
- { .fc_id = 787, .cpu_id = 220, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 788, .cpu_id = 221, .valid = 1,
- .msg = 0, .reset = 0, .name = "PDMA0_BM_SPMU" },
- { .fc_id = 789, .cpu_id = 222, .valid = 1,
- .msg = 0, .reset = 0, .name = "PDMA1_BM_SPMU" },
- { .fc_id = 790, .cpu_id = 223, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM0_MC0_SPI" },
- { .fc_id = 791, .cpu_id = 224, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM0_MC1_SPI" },
- { .fc_id = 792, .cpu_id = 225, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM1_MC0_SPI" },
- { .fc_id = 793, .cpu_id = 226, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM1_MC1_SPI" },
- { .fc_id = 794, .cpu_id = 227, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM2_MC0_SPI" },
- { .fc_id = 795, .cpu_id = 228, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM2_MC1_SPI" },
- { .fc_id = 796, .cpu_id = 229, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM3_MC0_SPI" },
- { .fc_id = 797, .cpu_id = 230, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM3_MC1_SPI" },
- { .fc_id = 798, .cpu_id = 231, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM4_MC0_SPI" },
- { .fc_id = 799, .cpu_id = 232, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM4_MC1_SPI" },
- { .fc_id = 800, .cpu_id = 233, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM5_MC0_SPI" },
- { .fc_id = 801, .cpu_id = 234, .valid = 1,
- .msg = 0, .reset = 0, .name = "HBM5_MC1_SPI" },
- { .fc_id = 802, .cpu_id = 235, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 803, .cpu_id = 236, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 804, .cpu_id = 237, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 805, .cpu_id = 238, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 806, .cpu_id = 239, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 807, .cpu_id = 240, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 808, .cpu_id = 241, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 809, .cpu_id = 242, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 810, .cpu_id = 243, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 811, .cpu_id = 244, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 812, .cpu_id = 245, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 813, .cpu_id = 246, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 814, .cpu_id = 247, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 815, .cpu_id = 248, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 816, .cpu_id = 249, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 817, .cpu_id = 250, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 818, .cpu_id = 251, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 819, .cpu_id = 252, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 820, .cpu_id = 253, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 821, .cpu_id = 254, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 822, .cpu_id = 255, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 823, .cpu_id = 256, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 824, .cpu_id = 257, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 825, .cpu_id = 258, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 826, .cpu_id = 259, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 827, .cpu_id = 260, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 828, .cpu_id = 261, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 829, .cpu_id = 262, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 830, .cpu_id = 263, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 831, .cpu_id = 264, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 832, .cpu_id = 265, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 833, .cpu_id = 266, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 834, .cpu_id = 267, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 835, .cpu_id = 268, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 836, .cpu_id = 269, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 837, .cpu_id = 270, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 838, .cpu_id = 271, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 839, .cpu_id = 272, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 840, .cpu_id = 273, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 841, .cpu_id = 274, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 842, .cpu_id = 275, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 843, .cpu_id = 276, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 844, .cpu_id = 277, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 845, .cpu_id = 278, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 846, .cpu_id = 279, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 847, .cpu_id = 280, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 848, .cpu_id = 281, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 849, .cpu_id = 282, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 850, .cpu_id = 283, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 851, .cpu_id = 284, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 852, .cpu_id = 285, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 853, .cpu_id = 286, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 854, .cpu_id = 287, .valid = 0,
- .msg = 0, .reset = 1, .name = "" },
- { .fc_id = 855, .cpu_id = 288, .valid = 0,
- .msg = 0, .reset = 1, .name = "" },
- { .fc_id = 856, .cpu_id = 289, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 857, .cpu_id = 290, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 858, .cpu_id = 291, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 859, .cpu_id = 292, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 860, .cpu_id = 293, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 861, .cpu_id = 294, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 862, .cpu_id = 295, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 863, .cpu_id = 296, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 864, .cpu_id = 297, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 865, .cpu_id = 298, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 866, .cpu_id = 299, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 867, .cpu_id = 300, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 868, .cpu_id = 301, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 869, .cpu_id = 302, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 870, .cpu_id = 303, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 871, .cpu_id = 304, .valid = 1,
- .msg = 0, .reset = 1, .name = "RPM_ERROR_OR_DRAIN" },
- { .fc_id = 872, .cpu_id = 305, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 873, .cpu_id = 306, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 874, .cpu_id = 307, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 875, .cpu_id = 308, .valid = 1,
- .msg = 0, .reset = 0, .name = "RAZWI_OR_PID_MIN_MAX_INTERRUPT" },
- { .fc_id = 876, .cpu_id = 309, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 877, .cpu_id = 310, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 878, .cpu_id = 311, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 879, .cpu_id = 312, .valid = 0,
- .msg = 0, .reset = 1, .name = "" },
- { .fc_id = 880, .cpu_id = 313, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 881, .cpu_id = 314, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 882, .cpu_id = 315, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 883, .cpu_id = 316, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 884, .cpu_id = 317, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 885, .cpu_id = 318, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 886, .cpu_id = 319, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 887, .cpu_id = 320, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 888, .cpu_id = 321, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 889, .cpu_id = 322, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 890, .cpu_id = 323, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 891, .cpu_id = 324, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 892, .cpu_id = 325, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 893, .cpu_id = 326, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 894, .cpu_id = 327, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 895, .cpu_id = 328, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 896, .cpu_id = 329, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC0_SPI" },
- { .fc_id = 897, .cpu_id = 329, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC0_BMON_SPMU" },
- { .fc_id = 898, .cpu_id = 330, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC1_SPI" },
- { .fc_id = 899, .cpu_id = 330, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC1_BMON_SPMU" },
- { .fc_id = 900, .cpu_id = 331, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC2_SPI" },
- { .fc_id = 901, .cpu_id = 331, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC2_BMON_SPMU" },
- { .fc_id = 902, .cpu_id = 332, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC3_SPI" },
- { .fc_id = 903, .cpu_id = 332, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC3_BMON_SPMU" },
- { .fc_id = 904, .cpu_id = 333, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC4_SPI" },
- { .fc_id = 905, .cpu_id = 333, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC4_BMON_SPMU" },
- { .fc_id = 906, .cpu_id = 334, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC5_SPI" },
- { .fc_id = 907, .cpu_id = 334, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC5_BMON_SPMU" },
- { .fc_id = 908, .cpu_id = 335, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC6_SPI" },
- { .fc_id = 909, .cpu_id = 335, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC6_BMON_SPMU" },
- { .fc_id = 910, .cpu_id = 336, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC7_SPI" },
- { .fc_id = 911, .cpu_id = 336, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC7_BMON_SPMU" },
- { .fc_id = 912, .cpu_id = 337, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC8_SPI" },
- { .fc_id = 913, .cpu_id = 337, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC8_BMON_SPMU" },
- { .fc_id = 914, .cpu_id = 338, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC9_SPI" },
- { .fc_id = 915, .cpu_id = 338, .valid = 1,
- .msg = 0, .reset = 0, .name = "DEC9_BMON_SPMU" },
- { .fc_id = 916, .cpu_id = 339, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 917, .cpu_id = 340, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 918, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 919, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 920, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 921, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 922, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 923, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 924, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 925, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 926, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 927, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 928, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 929, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 930, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 931, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 932, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 933, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 934, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 935, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 936, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 937, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 938, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 939, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 940, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 941, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 942, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 943, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 944, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 945, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 946, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 947, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 948, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 949, .cpu_id = 341, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 950, .cpu_id = 342, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 951, .cpu_id = 343, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC0_BMON_SPMU" },
- { .fc_id = 952, .cpu_id = 343, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC0_SW_ERROR" },
- { .fc_id = 953, .cpu_id = 343, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 954, .cpu_id = 343, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 955, .cpu_id = 344, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC1_BMON_SPMU" },
- { .fc_id = 956, .cpu_id = 344, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC1_SW_ERROR" },
- { .fc_id = 957, .cpu_id = 344, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 958, .cpu_id = 344, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 959, .cpu_id = 345, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC2_BMON_SPMU" },
- { .fc_id = 960, .cpu_id = 345, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC2_SW_ERROR" },
- { .fc_id = 961, .cpu_id = 345, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 962, .cpu_id = 345, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 963, .cpu_id = 346, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC3_BMON_SPMU" },
- { .fc_id = 964, .cpu_id = 346, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC3_SW_ERROR" },
- { .fc_id = 965, .cpu_id = 346, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 966, .cpu_id = 346, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 967, .cpu_id = 347, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC4_BMON_SPMU" },
- { .fc_id = 968, .cpu_id = 347, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC4_SW_ERROR" },
- { .fc_id = 969, .cpu_id = 347, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 970, .cpu_id = 347, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 971, .cpu_id = 348, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC5_BMON_SPMU" },
- { .fc_id = 972, .cpu_id = 348, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC5_SW_ERROR" },
- { .fc_id = 973, .cpu_id = 348, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 974, .cpu_id = 348, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 975, .cpu_id = 349, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC6_BMON_SPMU" },
- { .fc_id = 976, .cpu_id = 349, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC6_SW_ERROR" },
- { .fc_id = 977, .cpu_id = 349, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 978, .cpu_id = 349, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 979, .cpu_id = 350, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC7_BMON_SPMU" },
- { .fc_id = 980, .cpu_id = 350, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC7_SW_ERROR" },
- { .fc_id = 981, .cpu_id = 350, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 982, .cpu_id = 350, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 983, .cpu_id = 351, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC8_BMON_SPMU" },
- { .fc_id = 984, .cpu_id = 351, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC8_SW_ERROR" },
- { .fc_id = 985, .cpu_id = 351, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 986, .cpu_id = 351, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 987, .cpu_id = 352, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC9_BMON_SPMU" },
- { .fc_id = 988, .cpu_id = 352, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC9_SW_ERROR" },
- { .fc_id = 989, .cpu_id = 352, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 990, .cpu_id = 352, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 991, .cpu_id = 353, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC10_BMON_SPMU" },
- { .fc_id = 992, .cpu_id = 353, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC10_SW_ERROR" },
- { .fc_id = 993, .cpu_id = 353, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 994, .cpu_id = 353, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 995, .cpu_id = 354, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC11_BMON_SPMU" },
- { .fc_id = 996, .cpu_id = 354, .valid = 1,
- .msg = 0, .reset = 0, .name = "NIC11_SW_ERROR" },
- { .fc_id = 997, .cpu_id = 354, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 998, .cpu_id = 354, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 999, .cpu_id = 355, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1000, .cpu_id = 356, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1001, .cpu_id = 357, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1002, .cpu_id = 358, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1003, .cpu_id = 359, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1004, .cpu_id = 360, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1005, .cpu_id = 361, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1006, .cpu_id = 362, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1007, .cpu_id = 363, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1008, .cpu_id = 368, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1009, .cpu_id = 369, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1010, .cpu_id = 366, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1011, .cpu_id = 367, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1012, .cpu_id = 364, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1013, .cpu_id = 365, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1014, .cpu_id = 374, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1015, .cpu_id = 375, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1016, .cpu_id = 372, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1017, .cpu_id = 373, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1018, .cpu_id = 370, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1019, .cpu_id = 371, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1020, .cpu_id = 376, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1021, .cpu_id = 377, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1022, .cpu_id = 378, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1023, .cpu_id = 379, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1024, .cpu_id = 380, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1025, .cpu_id = 381, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1026, .cpu_id = 382, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1027, .cpu_id = 383, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1028, .cpu_id = 384, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1029, .cpu_id = 385, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1030, .cpu_id = 386, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1031, .cpu_id = 387, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1032, .cpu_id = 388, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1033, .cpu_id = 389, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1034, .cpu_id = 390, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1035, .cpu_id = 391, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1036, .cpu_id = 392, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1037, .cpu_id = 393, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1038, .cpu_id = 394, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1039, .cpu_id = 395, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1040, .cpu_id = 396, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1041, .cpu_id = 397, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1042, .cpu_id = 398, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1043, .cpu_id = 399, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1044, .cpu_id = 400, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1045, .cpu_id = 401, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1046, .cpu_id = 402, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1047, .cpu_id = 403, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1048, .cpu_id = 404, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1049, .cpu_id = 405, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1050, .cpu_id = 406, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1051, .cpu_id = 407, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1052, .cpu_id = 408, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1053, .cpu_id = 409, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1054, .cpu_id = 410, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1055, .cpu_id = 411, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1056, .cpu_id = 412, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1057, .cpu_id = 413, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1058, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1059, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1060, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1061, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1062, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1063, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1064, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1065, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1066, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1067, .cpu_id = 414, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1068, .cpu_id = 415, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1069, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1070, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1071, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1072, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1073, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1074, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1075, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1076, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1077, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1078, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1079, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1080, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1081, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1082, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1083, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1084, .cpu_id = 416, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1085, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1086, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1087, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1088, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1089, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1090, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1091, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1092, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1093, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1094, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1095, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1096, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1097, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1098, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1099, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1100, .cpu_id = 417, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1101, .cpu_id = 418, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1102, .cpu_id = 419, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1103, .cpu_id = 420, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1104, .cpu_id = 421, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1105, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1106, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1107, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1108, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1109, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1110, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1111, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1112, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1113, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1114, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1115, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1116, .cpu_id = 422, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1117, .cpu_id = 423, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1118, .cpu_id = 424, .valid = 1,
- .msg = 0, .reset = 0, .name = "ROTATOR0_SERR" },
- { .fc_id = 1119, .cpu_id = 425, .valid = 1,
- .msg = 0, .reset = 0, .name = "ROTATOR1_SERR" },
- { .fc_id = 1120, .cpu_id = 426, .valid = 1,
- .msg = 0, .reset = 1, .name = "ROTATOR0_DERR" },
- { .fc_id = 1121, .cpu_id = 427, .valid = 1,
- .msg = 0, .reset = 1, .name = "ROTATOR1_DERR" },
- { .fc_id = 1122, .cpu_id = 428, .valid = 1,
- .msg = 0, .reset = 1, .name = "ROTATOR0_AXI_ERROR_RESPONSE" },
- { .fc_id = 1123, .cpu_id = 429, .valid = 1,
- .msg = 0, .reset = 1, .name = "ROTATOR1_AXI_ERROR_RESPONSE" },
- { .fc_id = 1124, .cpu_id = 430, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1125, .cpu_id = 431, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1126, .cpu_id = 432, .valid = 1,
- .msg = 0, .reset = 0, .name = "ROTATOR0_BMON_SPMU" },
- { .fc_id = 1127, .cpu_id = 433, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1128, .cpu_id = 434, .valid = 1,
- .msg = 0, .reset = 0, .name = "ROTATOR1_BMON_SPMU" },
- { .fc_id = 1129, .cpu_id = 435, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1130, .cpu_id = 436, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM0_BMON_SPMU" },
- { .fc_id = 1131, .cpu_id = 437, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM1_BMON_SPMU" },
- { .fc_id = 1132, .cpu_id = 438, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM2_BMON_SPMU" },
- { .fc_id = 1133, .cpu_id = 439, .valid = 1,
- .msg = 0, .reset = 0, .name = "SM3_BMON_SPMU" },
- { .fc_id = 1134, .cpu_id = 440, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1135, .cpu_id = 441, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1136, .cpu_id = 442, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1137, .cpu_id = 443, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1138, .cpu_id = 444, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1139, .cpu_id = 445, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1140, .cpu_id = 446, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1141, .cpu_id = 447, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1142, .cpu_id = 448, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1143, .cpu_id = 449, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1144, .cpu_id = 450, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1145, .cpu_id = 451, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1146, .cpu_id = 452, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1147, .cpu_id = 453, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1148, .cpu_id = 454, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1149, .cpu_id = 455, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1150, .cpu_id = 456, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1151, .cpu_id = 457, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1152, .cpu_id = 458, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1153, .cpu_id = 459, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1154, .cpu_id = 460, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1155, .cpu_id = 461, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1156, .cpu_id = 462, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1157, .cpu_id = 463, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1158, .cpu_id = 464, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1159, .cpu_id = 465, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1160, .cpu_id = 466, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1161, .cpu_id = 467, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1162, .cpu_id = 468, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1163, .cpu_id = 469, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1164, .cpu_id = 470, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1165, .cpu_id = 471, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1166, .cpu_id = 472, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1167, .cpu_id = 473, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1168, .cpu_id = 474, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1169, .cpu_id = 475, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1170, .cpu_id = 476, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1171, .cpu_id = 477, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1172, .cpu_id = 478, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1173, .cpu_id = 479, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1174, .cpu_id = 480, .valid = 1,
- .msg = 1, .reset = 0, .name = "PSOC_DMA_QM" },
- { .fc_id = 1175, .cpu_id = 481, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1176, .cpu_id = 482, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1177, .cpu_id = 483, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1178, .cpu_id = 484, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1179, .cpu_id = 485, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1180, .cpu_id = 486, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1181, .cpu_id = 487, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1182, .cpu_id = 488, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1183, .cpu_id = 489, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1184, .cpu_id = 490, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1185, .cpu_id = 491, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1186, .cpu_id = 492, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1187, .cpu_id = 493, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1188, .cpu_id = 494, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1189, .cpu_id = 495, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1190, .cpu_id = 496, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1191, .cpu_id = 497, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1192, .cpu_id = 498, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1193, .cpu_id = 499, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1194, .cpu_id = 500, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1195, .cpu_id = 501, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1196, .cpu_id = 502, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1197, .cpu_id = 503, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1198, .cpu_id = 504, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1199, .cpu_id = 505, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1200, .cpu_id = 506, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1201, .cpu_id = 507, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1202, .cpu_id = 508, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1203, .cpu_id = 509, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1204, .cpu_id = 510, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1205, .cpu_id = 511, .valid = 0,
- .msg = 0, .reset = 0, .name = "" },
- { .fc_id = 1206, .cpu_id = 512, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC0_QM" },
- { .fc_id = 1207, .cpu_id = 513, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC1_QM" },
- { .fc_id = 1208, .cpu_id = 514, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC2_QM" },
- { .fc_id = 1209, .cpu_id = 515, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC3_QM" },
- { .fc_id = 1210, .cpu_id = 516, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC4_QM" },
- { .fc_id = 1211, .cpu_id = 517, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC5_QM" },
- { .fc_id = 1212, .cpu_id = 518, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC6_QM" },
- { .fc_id = 1213, .cpu_id = 519, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC7_QM" },
- { .fc_id = 1214, .cpu_id = 520, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC8_QM" },
- { .fc_id = 1215, .cpu_id = 521, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC9_QM" },
- { .fc_id = 1216, .cpu_id = 522, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC10_QM" },
- { .fc_id = 1217, .cpu_id = 523, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC11_QM" },
- { .fc_id = 1218, .cpu_id = 524, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC12_QM" },
- { .fc_id = 1219, .cpu_id = 525, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC13_QM" },
- { .fc_id = 1220, .cpu_id = 526, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC14_QM" },
- { .fc_id = 1221, .cpu_id = 527, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC15_QM" },
- { .fc_id = 1222, .cpu_id = 528, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC16_QM" },
- { .fc_id = 1223, .cpu_id = 529, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC17_QM" },
- { .fc_id = 1224, .cpu_id = 530, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC18_QM" },
- { .fc_id = 1225, .cpu_id = 531, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC19_QM" },
- { .fc_id = 1226, .cpu_id = 532, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC20_QM" },
- { .fc_id = 1227, .cpu_id = 533, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC21_QM" },
- { .fc_id = 1228, .cpu_id = 534, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC22_QM" },
- { .fc_id = 1229, .cpu_id = 535, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC23_QM" },
- { .fc_id = 1230, .cpu_id = 536, .valid = 1,
- .msg = 1, .reset = 0, .name = "TPC24_QM" },
- { .fc_id = 1231, .cpu_id = 537, .valid = 0,
- .msg = 1, .reset = 0, .name = "" },
- { .fc_id = 1232, .cpu_id = 538, .valid = 1,
- .msg = 1, .reset = 0, .name = "MME0_QM" },
- { .fc_id = 1233, .cpu_id = 539, .valid = 1,
- .msg = 1, .reset = 0, .name = "MME1_QM" },
- { .fc_id = 1234, .cpu_id = 540, .valid = 1,
- .msg = 1, .reset = 0, .name = "MME2_QM" },
- { .fc_id = 1235, .cpu_id = 541, .valid = 1,
- .msg = 1, .reset = 0, .name = "MME3_QM" },
- { .fc_id = 1236, .cpu_id = 542, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA2_QM" },
- { .fc_id = 1237, .cpu_id = 543, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA3_QM" },
- { .fc_id = 1238, .cpu_id = 544, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA0_QM" },
- { .fc_id = 1239, .cpu_id = 545, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA1_QM" },
- { .fc_id = 1240, .cpu_id = 546, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA6_QM" },
- { .fc_id = 1241, .cpu_id = 547, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA7_QM" },
- { .fc_id = 1242, .cpu_id = 548, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA4_QM" },
- { .fc_id = 1243, .cpu_id = 549, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA5_QM" },
- { .fc_id = 1244, .cpu_id = 550, .valid = 1,
- .msg = 1, .reset = 0, .name = "PDMA0_QM" },
- { .fc_id = 1245, .cpu_id = 551, .valid = 1,
- .msg = 1, .reset = 0, .name = "PDMA1_QM" },
- { .fc_id = 1246, .cpu_id = 552, .valid = 1,
- .msg = 1, .reset = 0, .name = "PI_UPDATE" },
- { .fc_id = 1247, .cpu_id = 553, .valid = 1,
- .msg = 1, .reset = 0, .name = "HALT_MACHINE" },
- { .fc_id = 1248, .cpu_id = 554, .valid = 1,
- .msg = 1, .reset = 0, .name = "INTS_REGISTER" },
- { .fc_id = 1249, .cpu_id = 555, .valid = 1,
- .msg = 1, .reset = 0, .name = "ROT0_QM" },
- { .fc_id = 1250, .cpu_id = 556, .valid = 1,
- .msg = 1, .reset = 0, .name = "ROT1_QM" },
- { .fc_id = 1251, .cpu_id = 557, .valid = 1,
- .msg = 1, .reset = 0, .name = "SOFT_RESET" },
- { .fc_id = 1252, .cpu_id = 558, .valid = 1,
- .msg = 1, .reset = 0, .name = "CPLD_SHUTDOWN_CAUSE" },
- { .fc_id = 1253, .cpu_id = 559, .valid = 1,
- .msg = 1, .reset = 0, .name = "FIX_POWER_ENV_S" },
- { .fc_id = 1254, .cpu_id = 560, .valid = 1,
- .msg = 1, .reset = 0, .name = "FIX_POWER_ENV_E" },
- { .fc_id = 1255, .cpu_id = 561, .valid = 1,
- .msg = 1, .reset = 0, .name = "FIX_THERMAL_ENV_S" },
- { .fc_id = 1256, .cpu_id = 562, .valid = 1,
- .msg = 1, .reset = 0, .name = "FIX_THERMAL_ENV_E" },
- { .fc_id = 1257, .cpu_id = 563, .valid = 1,
- .msg = 1, .reset = 0, .name = "CPLD_SHUTDOWN_EVENT" },
- { .fc_id = 1258, .cpu_id = 564, .valid = 1,
- .msg = 1, .reset = 0, .name = "PKT_QUEUE_OUT_SYNC" },
- { .fc_id = 1259, .cpu_id = 565, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA2_CORE" },
- { .fc_id = 1260, .cpu_id = 566, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA3_CORE" },
- { .fc_id = 1261, .cpu_id = 567, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA0_CORE" },
- { .fc_id = 1262, .cpu_id = 568, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA1_CORE" },
- { .fc_id = 1263, .cpu_id = 569, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA6_CORE" },
- { .fc_id = 1264, .cpu_id = 570, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA7_CORE" },
- { .fc_id = 1265, .cpu_id = 571, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA4_CORE" },
- { .fc_id = 1266, .cpu_id = 572, .valid = 1,
- .msg = 1, .reset = 0, .name = "HDMA5_CORE" },
- { .fc_id = 1267, .cpu_id = 573, .valid = 1,
- .msg = 1, .reset = 0, .name = "PDMA0_CORE" },
- { .fc_id = 1268, .cpu_id = 574, .valid = 1,
- .msg = 1, .reset = 0, .name = "PDMA1_CORE" },
- { .fc_id = 1269, .cpu_id = 575, .valid = 1,
- .msg = 1, .reset = 0, .name = "KDMA0_CORE" },
- { .fc_id = 1270, .cpu_id = 576, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC0_QM0" },
- { .fc_id = 1271, .cpu_id = 577, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC0_QM1" },
- { .fc_id = 1272, .cpu_id = 578, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC1_QM0" },
- { .fc_id = 1273, .cpu_id = 579, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC1_QM1" },
- { .fc_id = 1274, .cpu_id = 580, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC2_QM0" },
- { .fc_id = 1275, .cpu_id = 581, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC2_QM1" },
- { .fc_id = 1276, .cpu_id = 582, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC3_QM0" },
- { .fc_id = 1277, .cpu_id = 583, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC3_QM1" },
- { .fc_id = 1278, .cpu_id = 584, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC4_QM0" },
- { .fc_id = 1279, .cpu_id = 585, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC4_QM1" },
- { .fc_id = 1280, .cpu_id = 586, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC5_QM0" },
- { .fc_id = 1281, .cpu_id = 587, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC5_QM1" },
- { .fc_id = 1282, .cpu_id = 588, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC6_QM0" },
- { .fc_id = 1283, .cpu_id = 589, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC6_QM1" },
- { .fc_id = 1284, .cpu_id = 590, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC7_QM0" },
- { .fc_id = 1285, .cpu_id = 591, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC7_QM1" },
- { .fc_id = 1286, .cpu_id = 592, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC8_QM0" },
- { .fc_id = 1287, .cpu_id = 593, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC8_QM1" },
- { .fc_id = 1288, .cpu_id = 594, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC9_QM0" },
- { .fc_id = 1289, .cpu_id = 595, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC9_QM1" },
- { .fc_id = 1290, .cpu_id = 596, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC10_QM0" },
- { .fc_id = 1291, .cpu_id = 597, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC10_QM1" },
- { .fc_id = 1292, .cpu_id = 598, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC11_QM0" },
- { .fc_id = 1293, .cpu_id = 599, .valid = 1,
- .msg = 1, .reset = 0, .name = "NIC11_QM1" },
- { .fc_id = 1294, .cpu_id = 600, .valid = 1,
- .msg = 1, .reset = 0, .name = "CPU_PKT_SANITY_FAILED" },
- { .fc_id = 1295, .cpu_id = 601, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC0_ENG0" },
- { .fc_id = 1296, .cpu_id = 602, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC0_ENG1" },
- { .fc_id = 1297, .cpu_id = 603, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC1_ENG0" },
- { .fc_id = 1298, .cpu_id = 604, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC1_ENG1" },
- { .fc_id = 1299, .cpu_id = 605, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC2_ENG0" },
- { .fc_id = 1300, .cpu_id = 606, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC2_ENG1" },
- { .fc_id = 1301, .cpu_id = 607, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC3_ENG0" },
- { .fc_id = 1302, .cpu_id = 608, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC3_ENG1" },
- { .fc_id = 1303, .cpu_id = 609, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC4_ENG0" },
- { .fc_id = 1304, .cpu_id = 610, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC4_ENG1" },
- { .fc_id = 1305, .cpu_id = 611, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC5_ENG0" },
- { .fc_id = 1306, .cpu_id = 612, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC5_ENG1" },
- { .fc_id = 1307, .cpu_id = 613, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC6_ENG0" },
- { .fc_id = 1308, .cpu_id = 614, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC6_ENG1" },
- { .fc_id = 1309, .cpu_id = 615, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC7_ENG0" },
- { .fc_id = 1310, .cpu_id = 616, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC7_ENG1" },
- { .fc_id = 1311, .cpu_id = 617, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC8_ENG0" },
- { .fc_id = 1312, .cpu_id = 618, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC8_ENG1" },
- { .fc_id = 1313, .cpu_id = 619, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC9_ENG0" },
- { .fc_id = 1314, .cpu_id = 620, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC9_ENG1" },
- { .fc_id = 1315, .cpu_id = 621, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC10_ENG0" },
- { .fc_id = 1316, .cpu_id = 622, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC10_ENG1" },
- { .fc_id = 1317, .cpu_id = 623, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG0" },
- { .fc_id = 1318, .cpu_id = 624, .valid = 1,
- .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" },
- { .fc_id = 1319, .cpu_id = 625, .valid = 1,
- .msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" },
- { .fc_id = 1320, .cpu_id = 626, .valid = 1,
- .msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" },
- { .fc_id = 1321, .cpu_id = 627, .valid = 1,
- .msg = 1, .reset = 1, .name = "DEV_RESET_REQ" },
+ { .fc_id = 0, .cpu_id = 0, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1, .cpu_id = 1, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 2, .cpu_id = 2, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 3, .cpu_id = 3, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 4, .cpu_id = 4, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 5, .cpu_id = 5, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 6, .cpu_id = 6, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 7, .cpu_id = 7, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 8, .cpu_id = 8, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 9, .cpu_id = 9, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 10, .cpu_id = 10, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 11, .cpu_id = 11, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 12, .cpu_id = 12, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 13, .cpu_id = 13, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 14, .cpu_id = 14, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 15, .cpu_id = 15, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 16, .cpu_id = 16, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 17, .cpu_id = 17, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 18, .cpu_id = 18, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 19, .cpu_id = 19, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 20, .cpu_id = 20, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 21, .cpu_id = 21, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 22, .cpu_id = 22, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 23, .cpu_id = 23, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 24, .cpu_id = 24, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 25, .cpu_id = 25, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 26, .cpu_id = 26, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 27, .cpu_id = 27, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 28, .cpu_id = 28, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 29, .cpu_id = 29, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 30, .cpu_id = 30, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 31, .cpu_id = 31, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 32, .cpu_id = 32, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_CORE_SERR" },
+ { .fc_id = 33, .cpu_id = 33, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_CORE_DERR" },
+ { .fc_id = 34, .cpu_id = 34, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_IF_SERR" },
+ { .fc_id = 35, .cpu_id = 35, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_IF_DERR" },
+ { .fc_id = 36, .cpu_id = 36, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_PHY_SERR" },
+ { .fc_id = 37, .cpu_id = 37, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_PHY_DERR" },
+ { .fc_id = 38, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC0_ECC_SERR" },
+ { .fc_id = 39, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC1_ECC_SERR" },
+ { .fc_id = 40, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC2_ECC_SERR" },
+ { .fc_id = 41, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC3_ECC_SERR" },
+ { .fc_id = 42, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC4_ECC_SERR" },
+ { .fc_id = 43, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC5_ECC_SERR" },
+ { .fc_id = 44, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC6_ECC_SERR" },
+ { .fc_id = 45, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC7_ECC_SERR" },
+ { .fc_id = 46, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC8_ECC_SERR" },
+ { .fc_id = 47, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC9_ECC_SERR" },
+ { .fc_id = 48, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC10_ECC_SERR" },
+ { .fc_id = 49, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC11_ECC_SERR" },
+ { .fc_id = 50, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC12_ECC_SERR" },
+ { .fc_id = 51, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC13_ECC_SERR" },
+ { .fc_id = 52, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC14_ECC_SERR" },
+ { .fc_id = 53, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC15_ECC_SERR" },
+ { .fc_id = 54, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC16_ECC_SERR" },
+ { .fc_id = 55, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC17_ECC_SERR" },
+ { .fc_id = 56, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC18_ECC_SERR" },
+ { .fc_id = 57, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC19_ECC_SERR" },
+ { .fc_id = 58, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC20_ECC_SERR" },
+ { .fc_id = 59, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC21_ECC_SERR" },
+ { .fc_id = 60, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC22_ECC_SERR" },
+ { .fc_id = 61, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC23_ECC_SERR" },
+ { .fc_id = 62, .cpu_id = 38, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC24_ECC_SERR" },
+ { .fc_id = 63, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC0_ECC_DERR" },
+ { .fc_id = 64, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC1_ECC_DERR" },
+ { .fc_id = 65, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC2_ECC_DERR" },
+ { .fc_id = 66, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC3_ECC_DERR" },
+ { .fc_id = 67, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC4_ECC_DERR" },
+ { .fc_id = 68, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC5_ECC_DERR" },
+ { .fc_id = 69, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC6_ECC_DERR" },
+ { .fc_id = 70, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC7_ECC_DERR" },
+ { .fc_id = 71, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC8_ECC_DERR" },
+ { .fc_id = 72, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC9_ECC_DERR" },
+ { .fc_id = 73, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC10_ECC_DERR" },
+ { .fc_id = 74, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC11_ECC_DERR" },
+ { .fc_id = 75, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC12_ECC_DERR" },
+ { .fc_id = 76, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC13_ECC_DERR" },
+ { .fc_id = 77, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC14_ECC_DERR" },
+ { .fc_id = 78, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC15_ECC_DERR" },
+ { .fc_id = 79, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC16_ECC_DERR" },
+ { .fc_id = 80, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC17_ECC_DERR" },
+ { .fc_id = 81, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC18_ECC_DERR" },
+ { .fc_id = 82, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC19_ECC_DERR" },
+ { .fc_id = 83, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC20_ECC_DERR" },
+ { .fc_id = 84, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC21_ECC_DERR" },
+ { .fc_id = 85, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC22_ECC_DERR" },
+ { .fc_id = 86, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC23_ECC_DERR" },
+ { .fc_id = 87, .cpu_id = 39, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "TPC24_ECC_DERR" },
+ { .fc_id = 88, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE0_ECC_SERR" },
+ { .fc_id = 89, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE1_ECC_SERR" },
+ { .fc_id = 90, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE2_ECC_SERR" },
+ { .fc_id = 91, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE3_ECC_SERR" },
+ { .fc_id = 92, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE4_ECC_SERR" },
+ { .fc_id = 93, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_CTRL_ECC_SERR" },
+ { .fc_id = 94, .cpu_id = 40, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_WAP_ECC_SERR" },
+ { .fc_id = 95, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE0_ECC_SERR" },
+ { .fc_id = 96, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE1_ECC_SERR" },
+ { .fc_id = 97, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE2_ECC_SERR" },
+ { .fc_id = 98, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE3_ECC_SERR" },
+ { .fc_id = 99, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE4_ECC_SERR" },
+ { .fc_id = 100, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_CTRL_ECC_SERR" },
+ { .fc_id = 101, .cpu_id = 41, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_WAP_ECC_SERR" },
+ { .fc_id = 102, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE0_ECC_SERR" },
+ { .fc_id = 103, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE1_ECC_SERR" },
+ { .fc_id = 104, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE2_ECC_SERR" },
+ { .fc_id = 105, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE3_ECC_SERR" },
+ { .fc_id = 106, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE4_ECC_SERR" },
+ { .fc_id = 107, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_CTRL_ECC_SERR" },
+ { .fc_id = 108, .cpu_id = 42, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_WAP_ECC_SERR" },
+ { .fc_id = 109, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE0_ECC_SERR" },
+ { .fc_id = 110, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE1_ECC_SERR" },
+ { .fc_id = 111, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE2_ECC_SERR" },
+ { .fc_id = 112, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE3_ECC_SERR" },
+ { .fc_id = 113, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE4_ECC_SERR" },
+ { .fc_id = 114, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_CTRL_ECC_SERR" },
+ { .fc_id = 115, .cpu_id = 43, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_WAP_ECC_SERR" },
+ { .fc_id = 116, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE0_ECC_DERR" },
+ { .fc_id = 117, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE1_ECC_DERR" },
+ { .fc_id = 118, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE2_ECC_DERR" },
+ { .fc_id = 119, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE3_ECC_DERR" },
+ { .fc_id = 120, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_SBTE4_ECC_DERR" },
+ { .fc_id = 121, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_CTRL_ECC_DERR" },
+ { .fc_id = 122, .cpu_id = 44, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME0_WAP_ECC_DERR" },
+ { .fc_id = 123, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE0_ECC_DERR" },
+ { .fc_id = 124, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE1_ECC_DERR" },
+ { .fc_id = 125, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE2_ECC_DERR" },
+ { .fc_id = 126, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE3_ECC_DERR" },
+ { .fc_id = 127, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_SBTE4_ECC_DERR" },
+ { .fc_id = 128, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_CTRL_ECC_DERR" },
+ { .fc_id = 129, .cpu_id = 45, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME1_WAP_ECC_DERR" },
+ { .fc_id = 130, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE0_ECC_DERR" },
+ { .fc_id = 131, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE1_ECC_DERR" },
+ { .fc_id = 132, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE2_ECC_DERR" },
+ { .fc_id = 133, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE3_ECC_DERR" },
+ { .fc_id = 134, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_SBTE4_ECC_DERR" },
+ { .fc_id = 135, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_CTRL_ECC_DERR" },
+ { .fc_id = 136, .cpu_id = 46, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME2_WAP_ECC_DERR" },
+ { .fc_id = 137, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE0_ECC_DERR" },
+ { .fc_id = 138, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE1_ECC_DERR" },
+ { .fc_id = 139, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE2_ECC_DERR" },
+ { .fc_id = 140, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE3_ECC_DERR" },
+ { .fc_id = 141, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_SBTE4_ECC_DERR" },
+ { .fc_id = 142, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_CTRL_ECC_DERR" },
+ { .fc_id = 143, .cpu_id = 47, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "MME3_WAP_ECC_DERR" },
+ { .fc_id = 144, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA2_ECC_SERR" },
+ { .fc_id = 145, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA3_ECC_SERR" },
+ { .fc_id = 146, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA0_ECC_SERR" },
+ { .fc_id = 147, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA1_ECC_SERR" },
+ { .fc_id = 148, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA6_ECC_SERR" },
+ { .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA7_ECC_SERR" },
+ { .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HDMA4_ECC_SERR" },
+ { .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HDMA5_ECC_SERR" },
+ { .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA2_ECC_DERR" },
+ { .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA3_ECC_DERR" },
+ { .fc_id = 154, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA0_ECC_DERR" },
+ { .fc_id = 155, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA1_ECC_DERR" },
+ { .fc_id = 156, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA6_ECC_DERR" },
+ { .fc_id = 157, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA7_ECC_DERR" },
+ { .fc_id = 158, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA4_ECC_DERR" },
+ { .fc_id = 159, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "EDMA5_ECC_DERR" },
+ { .fc_id = 160, .cpu_id = 50, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "KDMA0_ECC_SERR" },
+ { .fc_id = 161, .cpu_id = 51, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA0_ECC_SERR" },
+ { .fc_id = 162, .cpu_id = 51, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA1_ECC_SERR" },
+ { .fc_id = 163, .cpu_id = 52, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA0_ECC_DERR" },
+ { .fc_id = 164, .cpu_id = 53, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PDMA0_ECC_DERR" },
+ { .fc_id = 165, .cpu_id = 53, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PDMA1_ECC_DERR" },
+ { .fc_id = 166, .cpu_id = 54, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPU_IF_ECC_SERR" },
+ { .fc_id = 167, .cpu_id = 55, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_IF_ECC_DERR" },
+ { .fc_id = 168, .cpu_id = 56, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_MEM_SERR" },
+ { .fc_id = 169, .cpu_id = 57, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PSOC_MEM_DERR" },
+ { .fc_id = 170, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM0_ECC_SERR" },
+ { .fc_id = 171, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM1_ECC_SERR" },
+ { .fc_id = 172, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM2_ECC_SERR" },
+ { .fc_id = 173, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM3_ECC_SERR" },
+ { .fc_id = 174, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM4_ECC_SERR" },
+ { .fc_id = 175, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM5_ECC_SERR" },
+ { .fc_id = 176, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM6_ECC_SERR" },
+ { .fc_id = 177, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM7_ECC_SERR" },
+ { .fc_id = 178, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM8_ECC_SERR" },
+ { .fc_id = 179, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM9_ECC_SERR" },
+ { .fc_id = 180, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM10_ECC_SERR" },
+ { .fc_id = 181, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM11_ECC_SERR" },
+ { .fc_id = 182, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM12_ECC_SERR" },
+ { .fc_id = 183, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM13_ECC_SERR" },
+ { .fc_id = 184, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM14_ECC_SERR" },
+ { .fc_id = 185, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM15_ECC_SERR" },
+ { .fc_id = 186, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM16_ECC_SERR" },
+ { .fc_id = 187, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM17_ECC_SERR" },
+ { .fc_id = 188, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM18_ECC_SERR" },
+ { .fc_id = 189, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM19_ECC_SERR" },
+ { .fc_id = 190, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM20_ECC_SERR" },
+ { .fc_id = 191, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM21_ECC_SERR" },
+ { .fc_id = 192, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM22_ECC_SERR" },
+ { .fc_id = 193, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM23_ECC_SERR" },
+ { .fc_id = 194, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM24_ECC_SERR" },
+ { .fc_id = 195, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM25_ECC_SERR" },
+ { .fc_id = 196, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM26_ECC_SERR" },
+ { .fc_id = 197, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM27_ECC_SERR" },
+ { .fc_id = 198, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM28_ECC_SERR" },
+ { .fc_id = 199, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM29_ECC_SERR" },
+ { .fc_id = 200, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM30_ECC_SERR" },
+ { .fc_id = 201, .cpu_id = 58, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SRAM31_ECC_SERR" },
+ { .fc_id = 202, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM0_ECC_DERR" },
+ { .fc_id = 203, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM1_ECC_DERR" },
+ { .fc_id = 204, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM2_ECC_DERR" },
+ { .fc_id = 205, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM3_ECC_DERR" },
+ { .fc_id = 206, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM4_ECC_DERR" },
+ { .fc_id = 207, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM5_ECC_DERR" },
+ { .fc_id = 208, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM6_ECC_DERR" },
+ { .fc_id = 209, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM7_ECC_DERR" },
+ { .fc_id = 210, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM8_ECC_DERR" },
+ { .fc_id = 211, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM9_ECC_DERR" },
+ { .fc_id = 212, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM10_ECC_DERR" },
+ { .fc_id = 213, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM11_ECC_DERR" },
+ { .fc_id = 214, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM12_ECC_DERR" },
+ { .fc_id = 215, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM13_ECC_DERR" },
+ { .fc_id = 216, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM14_ECC_DERR" },
+ { .fc_id = 217, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM15_ECC_DERR" },
+ { .fc_id = 218, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM16_ECC_DERR" },
+ { .fc_id = 219, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM17_ECC_DERR" },
+ { .fc_id = 220, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM18_ECC_DERR" },
+ { .fc_id = 221, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM19_ECC_DERR" },
+ { .fc_id = 222, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM20_ECC_DERR" },
+ { .fc_id = 223, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM21_ECC_DERR" },
+ { .fc_id = 224, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM22_ECC_DERR" },
+ { .fc_id = 225, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM23_ECC_DERR" },
+ { .fc_id = 226, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM24_ECC_DERR" },
+ { .fc_id = 227, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM25_ECC_DERR" },
+ { .fc_id = 228, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM26_ECC_DERR" },
+ { .fc_id = 229, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM27_ECC_DERR" },
+ { .fc_id = 230, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM28_ECC_DERR" },
+ { .fc_id = 231, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM29_ECC_DERR" },
+ { .fc_id = 232, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM30_ECC_DERR" },
+ { .fc_id = 233, .cpu_id = 59, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SRAM31_ECC_DERR" },
+ { .fc_id = 234, .cpu_id = 60, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "GIC500" },
+ { .fc_id = 235, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_0_MC0_ECC_SERR" },
+ { .fc_id = 236, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_1_MC0_ECC_SERR" },
+ { .fc_id = 237, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_2_MC0_ECC_SERR" },
+ { .fc_id = 238, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_3_MC0_ECC_SERR" },
+ { .fc_id = 239, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_4_MC0_ECC_SERR" },
+ { .fc_id = 240, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_5_MC0_ECC_SERR" },
+ { .fc_id = 241, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_0_MC1_ECC_SERR" },
+ { .fc_id = 242, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_1_MC1_ECC_SERR" },
+ { .fc_id = 243, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_2_MC1_ECC_SERR" },
+ { .fc_id = 244, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_3_MC1_ECC_SERR" },
+ { .fc_id = 245, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_4_MC1_ECC_SERR" },
+ { .fc_id = 246, .cpu_id = 61, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM_5_MC1_ECC_SERR" },
+ { .fc_id = 247, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_0_MC0_ECC_DERR" },
+ { .fc_id = 248, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_1_MC0_ECC_DERR" },
+ { .fc_id = 249, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_2_MC0_ECC_DERR" },
+ { .fc_id = 250, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_3_MC0_ECC_DERR" },
+ { .fc_id = 251, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_4_MC0_ECC_DERR" },
+ { .fc_id = 252, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_5_MC0_ECC_DERR" },
+ { .fc_id = 253, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_0_MC1_ECC_DERR" },
+ { .fc_id = 254, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_1_MC1_ECC_DERR" },
+ { .fc_id = 255, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_2_MC1_ECC_DERR" },
+ { .fc_id = 256, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_3_MC1_ECC_DERR" },
+ { .fc_id = 257, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_4_MC1_ECC_DERR" },
+ { .fc_id = 258, .cpu_id = 62, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_5_MC1_ECC_DERR" },
+ { .fc_id = 259, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_0_ECC_SERR" },
+ { .fc_id = 260, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_1_ECC_SERR" },
+ { .fc_id = 261, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_2_ECC_SERR" },
+ { .fc_id = 262, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_3_ECC_SERR" },
+ { .fc_id = 263, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_8_ECC_SERR" },
+ { .fc_id = 264, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_9_ECC_SERR" },
+ { .fc_id = 265, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_10_ECC_SERR" },
+ { .fc_id = 266, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_11_ECC_SERR" },
+ { .fc_id = 267, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_7_ECC_SERR" },
+ { .fc_id = 268, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_6_ECC_SERR" },
+ { .fc_id = 269, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_5_ECC_SERR" },
+ { .fc_id = 270, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_4_ECC_SERR" },
+ { .fc_id = 271, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_15_ECC_SERR" },
+ { .fc_id = 272, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_14_ECC_SERR" },
+ { .fc_id = 273, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_13_ECC_SERR" },
+ { .fc_id = 274, .cpu_id = 63, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU_12_ECC_SERR" },
+ { .fc_id = 275, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_0_ECC_DERR" },
+ { .fc_id = 276, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_1_ECC_DERR" },
+ { .fc_id = 277, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_2_ECC_DERR" },
+ { .fc_id = 278, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_3_ECC_DERR" },
+ { .fc_id = 279, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_8_ECC_DERR" },
+ { .fc_id = 280, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_9_ECC_DERR" },
+ { .fc_id = 281, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_10_ECC_DERR" },
+ { .fc_id = 282, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_11_ECC_DERR" },
+ { .fc_id = 283, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_7_ECC_DERR" },
+ { .fc_id = 284, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_6_ECC_DERR" },
+ { .fc_id = 285, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_5_ECC_DERR" },
+ { .fc_id = 286, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_4_ECC_DERR" },
+ { .fc_id = 287, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_15_ECC_DERR" },
+ { .fc_id = 288, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_14_ECC_DERR" },
+ { .fc_id = 289, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_13_ECC_DERR" },
+ { .fc_id = 290, .cpu_id = 64, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_12_ECC_DERR" },
+ { .fc_id = 291, .cpu_id = 65, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_ECC_SERR" },
+ { .fc_id = 292, .cpu_id = 66, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_ECC_DERR" },
+ { .fc_id = 293, .cpu_id = 67, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 294, .cpu_id = 68, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 295, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_VCD_ECC_SERR" },
+ { .fc_id = 296, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC1_VCD_ECC_SERR" },
+ { .fc_id = 297, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_VCD_ECC_SERR" },
+ { .fc_id = 298, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_VCD_ECC_SERR" },
+ { .fc_id = 299, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_VCD_ECC_SERR" },
+ { .fc_id = 300, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_VCD_ECC_SERR" },
+ { .fc_id = 301, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_VCD_ECC_SERR" },
+ { .fc_id = 302, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_VCD_ECC_SERR" },
+ { .fc_id = 303, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_VCD_ECC_SERR" },
+ { .fc_id = 304, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_VCD_ECC_SERR" },
+ { .fc_id = 305, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_L2C_ECC_SERR" },
+ { .fc_id = 306, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC1_L2C_ECC_SERR" },
+ { .fc_id = 307, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_L2C_ECC_SERR" },
+ { .fc_id = 308, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_L2C_ECC_SERR" },
+ { .fc_id = 309, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_L2C_ECC_SERR" },
+ { .fc_id = 310, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_L2C_ECC_SERR" },
+ { .fc_id = 311, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_L2C_ECC_SERR" },
+ { .fc_id = 312, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_L2C_ECC_SERR" },
+ { .fc_id = 313, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_L2C_ECC_SERR" },
+ { .fc_id = 314, .cpu_id = 69, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_L2C_ECC_SERR" },
+ { .fc_id = 315, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC0_VCD_ECC_DERR" },
+ { .fc_id = 316, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC1_VCD_ECC_DERR" },
+ { .fc_id = 317, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC2_VCD_ECC_DERR" },
+ { .fc_id = 318, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC3_VCD_ECC_DERR" },
+ { .fc_id = 319, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC4_VCD_ECC_DERR" },
+ { .fc_id = 320, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC5_VCD_ECC_DERR" },
+ { .fc_id = 321, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC6_VCD_ECC_DERR" },
+ { .fc_id = 322, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC7_VCD_ECC_DERR" },
+ { .fc_id = 323, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC8_VCD_ECC_DERR" },
+ { .fc_id = 324, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC9_VCD_ECC_DERR" },
+ { .fc_id = 325, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC0_L2C_ECC_DERR" },
+ { .fc_id = 326, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC1_L2C_ECC_DERR" },
+ { .fc_id = 327, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC2_L2C_ECC_DERR" },
+ { .fc_id = 328, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC3_L2C_ECC_DERR" },
+ { .fc_id = 329, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC4_L2C_ECC_DERR" },
+ { .fc_id = 330, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC5_L2C_ECC_DERR" },
+ { .fc_id = 331, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC6_L2C_ECC_DERR" },
+ { .fc_id = 332, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC7_L2C_ECC_DERR" },
+ { .fc_id = 333, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC8_L2C_ECC_DERR" },
+ { .fc_id = 334, .cpu_id = 70, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEC9_L2C_ECC_DERR" },
+ { .fc_id = 335, .cpu_id = 71, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 336, .cpu_id = 72, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 337, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF0_ECC_SERR" },
+ { .fc_id = 338, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF1_ECC_SERR" },
+ { .fc_id = 339, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF2_ECC_SERR" },
+ { .fc_id = 340, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF3_ECC_SERR" },
+ { .fc_id = 341, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF8_ECC_SERR" },
+ { .fc_id = 342, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF9_ECC_SERR" },
+ { .fc_id = 343, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF10_ECC_SERR" },
+ { .fc_id = 344, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF11_ECC_SERR" },
+ { .fc_id = 345, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF7_ECC_SERR" },
+ { .fc_id = 346, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF6_ECC_SERR" },
+ { .fc_id = 347, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF5_ECC_SERR" },
+ { .fc_id = 348, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF4_ECC_SERR" },
+ { .fc_id = 349, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF15_ECC_SERR" },
+ { .fc_id = 350, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF14_ECC_SERR" },
+ { .fc_id = 351, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF13_ECC_SERR" },
+ { .fc_id = 352, .cpu_id = 73, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HIF12_ECC_SERR" },
+ { .fc_id = 353, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF0_ECC_DERR" },
+ { .fc_id = 354, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF1_ECC_DERR" },
+ { .fc_id = 355, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF2_ECC_DERR" },
+ { .fc_id = 356, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF3_ECC_DERR" },
+ { .fc_id = 357, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF8_ECC_DERR" },
+ { .fc_id = 358, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF9_ECC_DERR" },
+ { .fc_id = 359, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF10_ECC_DERR" },
+ { .fc_id = 360, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF11_ECC_DERR" },
+ { .fc_id = 361, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF7_ECC_DERR" },
+ { .fc_id = 362, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF6_ECC_DERR" },
+ { .fc_id = 363, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF5_ECC_DERR" },
+ { .fc_id = 364, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF4_ECC_DERR" },
+ { .fc_id = 365, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF15_ECC_DERR" },
+ { .fc_id = 366, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF14_ECC_DERR" },
+ { .fc_id = 367, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF13_ECC_DERR" },
+ { .fc_id = 368, .cpu_id = 74, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF12_ECC_DERR" },
+ { .fc_id = 369, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_ECC_SERR" },
+ { .fc_id = 370, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_ECC_SERR" },
+ { .fc_id = 371, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_ECC_SERR" },
+ { .fc_id = 372, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_ECC_SERR" },
+ { .fc_id = 373, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_ECC_SERR" },
+ { .fc_id = 374, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_ECC_SERR" },
+ { .fc_id = 375, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_ECC_SERR" },
+ { .fc_id = 376, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_ECC_SERR" },
+ { .fc_id = 377, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_ECC_SERR" },
+ { .fc_id = 378, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_ECC_SERR" },
+ { .fc_id = 379, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_ECC_SERR" },
+ { .fc_id = 380, .cpu_id = 75, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_ECC_SERR" },
+ { .fc_id = 381, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_ECC_DERR" },
+ { .fc_id = 382, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_ECC_DERR" },
+ { .fc_id = 383, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_ECC_DERR" },
+ { .fc_id = 384, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_ECC_DERR" },
+ { .fc_id = 385, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_ECC_DERR" },
+ { .fc_id = 386, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_ECC_DERR" },
+ { .fc_id = 387, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_ECC_DERR" },
+ { .fc_id = 388, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_ECC_DERR" },
+ { .fc_id = 389, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_ECC_DERR" },
+ { .fc_id = 390, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_ECC_DERR" },
+ { .fc_id = 391, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_ECC_DERR" },
+ { .fc_id = 392, .cpu_id = 76, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_ECC_DERR" },
+ { .fc_id = 393, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM0_ECC_DERR" },
+ { .fc_id = 394, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM1_ECC_DERR" },
+ { .fc_id = 395, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM2_ECC_DERR" },
+ { .fc_id = 396, .cpu_id = 77, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "SM3_ECC_DERR" },
+ { .fc_id = 397, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM0_ECC_SERR" },
+ { .fc_id = 398, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM1_ECC_SERR" },
+ { .fc_id = 399, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM2_ECC_SERR" },
+ { .fc_id = 400, .cpu_id = 78, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM3_ECC_SERR" },
+ { .fc_id = 401, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR0_ECC_SERR" },
+ { .fc_id = 402, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR1_ECC_SERR" },
+ { .fc_id = 403, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR2_ECC_SERR" },
+ { .fc_id = 404, .cpu_id = 79, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "XBAR3_ECC_SERR" },
+ { .fc_id = 405, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR0_ECC_DERR" },
+ { .fc_id = 406, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR1_ECC_DERR" },
+ { .fc_id = 407, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR2_ECC_DERR" },
+ { .fc_id = 408, .cpu_id = 80, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "XBAR3_ECC_DERR" },
+ { .fc_id = 409, .cpu_id = 81, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ARC0_ECC_SERR" },
+ { .fc_id = 410, .cpu_id = 82, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ARC0_ECC_DERR" },
+ { .fc_id = 411, .cpu_id = 83, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_ADDR_DEC_ERR" },
+ { .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_AXI_ERR_RSP" },
+ { .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_AXI_ERR_RSP" },
+ { .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_AXI_ERR_RSP" },
+ { .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_AXI_ERR_RSP" },
+ { .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_AXI_ERR_RSP" },
+ { .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_AXI_ERR_RSP" },
+ { .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_AXI_ERR_RSP" },
+ { .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_AXI_ERR_RSP" },
+ { .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_AXI_ERR_RSP" },
+ { .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_AXI_ERR_RSP" },
+ { .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_AXI_ERR_RSP" },
+ { .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_AXI_ERR_RSP" },
+ { .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_AXI_ERR_RSP" },
+ { .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_AXI_ERR_RSP" },
+ { .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_AXI_ERR_RSP" },
+ { .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_AXI_ERR_RSP" },
+ { .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_AXI_ERR_RSP" },
+ { .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_AXI_ERR_RSP" },
+ { .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_AXI_ERR_RSP" },
+ { .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_AXI_ERR_RSP" },
+ { .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_AXI_ERR_RSP" },
+ { .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_AXI_ERR_RSP" },
+ { .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_AXI_ERR_RSP" },
+ { .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_AXI_ERR_RSP" },
+ { .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_AXI_ERR_RSP" },
+ { .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "AXI_ECC" },
+ { .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "L2_RAM_ECC" },
+ { .fc_id = 440, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 441, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 442, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 443, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 444, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 445, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 446, .cpu_id = 88, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_QMAN_SW_ERROR" },
+ { .fc_id = 447, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 448, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 449, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 450, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 451, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 452, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 453, .cpu_id = 89, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_QMAN_SW_ERROR" },
+ { .fc_id = 454, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 455, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 456, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 457, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 458, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 459, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 460, .cpu_id = 90, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_QMAN_SW_ERROR" },
+ { .fc_id = 461, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE0_AXI_ERR_RSP" },
+ { .fc_id = 462, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE1_AXI_ERR_RSP" },
+ { .fc_id = 463, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE2_AXI_ERR_RSP" },
+ { .fc_id = 464, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE3_AXI_ERR_RSP" },
+ { .fc_id = 465, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_SBTE4_AXI_ERR_RSP" },
+ { .fc_id = 466, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
+ { .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_QMAN_SW_ERROR" },
+ { .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_MME_PLL_LOCK_ERR" },
+ { .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_CPU_PLL_LOCK_ERR" },
+ { .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_NIC_PLL_LOCK_ERR" },
+ { .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_MME_PLL_LOCK_ERR" },
+ { .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_PCI_PLL_LOCK_ERR" },
+ { .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
+ { .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
+ { .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
+ { .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
+ { .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
+ { .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_TPC_PLL_LOCK_ERR" },
+ { .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_VID_PLL_LOCK_ERR" },
+ { .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU_VID_PLL_LOCK_ERR" },
+ { .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE3_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE1_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE0_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DCORE2_HBM_PLL_LOCK_ERR" },
+ { .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_AXI_ERR_RSP" },
+ { .fc_id = 503, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_0_AXI_ERR_RSP" },
+ { .fc_id = 504, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_1_AXI_ERR_RSP" },
+ { .fc_id = 505, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_2_AXI_ERR_RSP" },
+ { .fc_id = 506, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_3_AXI_ERR_RSP" },
+ { .fc_id = 507, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_8_AXI_ERR_RSP" },
+ { .fc_id = 508, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_9_AXI_ERR_RSP" },
+ { .fc_id = 509, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_10_AXI_ERR_RSP" },
+ { .fc_id = 510, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_11_AXI_ERR_RSP" },
+ { .fc_id = 511, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_7_AXI_ERR_RSP" },
+ { .fc_id = 512, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_6_AXI_ERR_RSP" },
+ { .fc_id = 513, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_5_AXI_ERR_RSP" },
+ { .fc_id = 514, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_4_AXI_ERR_RSP" },
+ { .fc_id = 515, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_15_AXI_ERR_RSP" },
+ { .fc_id = 516, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_14_AXI_ERR_RSP" },
+ { .fc_id = 517, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_13_AXI_ERR_RSP" },
+ { .fc_id = 518, .cpu_id = 94, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU_12_AXI_ERR_RSP" },
+ { .fc_id = 519, .cpu_id = 95, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_FATAL" },
+ { .fc_id = 520, .cpu_id = 96, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU_AXI_ERR_RSP" },
+ { .fc_id = 521, .cpu_id = 97, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM0_ALARM_A" },
+ { .fc_id = 522, .cpu_id = 98, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM0_ALARM_B" },
+ { .fc_id = 523, .cpu_id = 99, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM1_ALARM_A" },
+ { .fc_id = 524, .cpu_id = 100, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM1_ALARM_B" },
+ { .fc_id = 525, .cpu_id = 101, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM2_ALARM_A" },
+ { .fc_id = 526, .cpu_id = 102, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM2_ALARM_B" },
+ { .fc_id = 527, .cpu_id = 103, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM3_ALARM_A" },
+ { .fc_id = 528, .cpu_id = 104, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "VM3_ALARM_B" },
+ { .fc_id = 529, .cpu_id = 105, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PSOC_AXI_ERR_RSP" },
+ { .fc_id = 530, .cpu_id = 106, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PSOC_PRSTN_FALL" },
+ { .fc_id = 531, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 532, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 533, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 534, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 535, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 536, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 537, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 538, .cpu_id = 107, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 539, .cpu_id = 108, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 540, .cpu_id = 109, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA_CH0_AXI_ERR_RSP" },
+ { .fc_id = 541, .cpu_id = 109, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA_CH1_AXI_ERR_RSP" },
+ { .fc_id = 542, .cpu_id = 110, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_0" },
+ { .fc_id = 543, .cpu_id = 111, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_1" },
+ { .fc_id = 544, .cpu_id = 112, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_2" },
+ { .fc_id = 545, .cpu_id = 113, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_3" },
+ { .fc_id = 546, .cpu_id = 114, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_4" },
+ { .fc_id = 547, .cpu_id = 115, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM_CATTRIP_5" },
+ { .fc_id = 548, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM0_MC0_SEI_SEVERE" },
+ { .fc_id = 549, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 550, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM0_MC1_SEI_SEVERE" },
+ { .fc_id = 551, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 552, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM1_MC0_SEI_SEVERE" },
+ { .fc_id = 553, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 554, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM1_MC1_SEI_SEVERE" },
+ { .fc_id = 555, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 556, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM2_MC0_SEI_SEVERE" },
+ { .fc_id = 557, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 558, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM2_MC1_SEI_SEVERE" },
+ { .fc_id = 559, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 560, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM3_MC0_SEI_SEVERE" },
+ { .fc_id = 561, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 562, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM3_MC1_SEI_SEVERE" },
+ { .fc_id = 563, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 564, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM4_MC0_SEI_SEVERE" },
+ { .fc_id = 565, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 566, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM4_MC1_SEI_SEVERE" },
+ { .fc_id = 567, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 568, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM5_MC0_SEI_SEVERE" },
+ { .fc_id = 569, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC0_SEI_NON_SEVERE" },
+ { .fc_id = 570, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HBM5_MC1_SEI_SEVERE" },
+ { .fc_id = 571, .cpu_id = 116, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC1_SEI_NON_SEVERE" },
+ { .fc_id = 572, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC0_AXI_ERR_RSPONSE" },
+ { .fc_id = 573, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_AXI_ERR_RSPONSE" },
+ { .fc_id = 574, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC2_AXI_ERR_RSPONSE" },
+ { .fc_id = 575, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC3_AXI_ERR_RSPONSE" },
+ { .fc_id = 576, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC4_AXI_ERR_RSPONSE" },
+ { .fc_id = 577, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC5_AXI_ERR_RSPONSE" },
+ { .fc_id = 578, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC6_AXI_ERR_RSPONSE" },
+ { .fc_id = 579, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC7_AXI_ERR_RSPONSE" },
+ { .fc_id = 580, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC8_AXI_ERR_RSPONSE" },
+ { .fc_id = 581, .cpu_id = 117, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC9_AXI_ERR_RSPONSE" },
+ { .fc_id = 582, .cpu_id = 118, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 583, .cpu_id = 119, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 584, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF0_FATAL" },
+ { .fc_id = 585, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF1_FATAL" },
+ { .fc_id = 586, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF2_FATAL" },
+ { .fc_id = 587, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF3_FATAL" },
+ { .fc_id = 588, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF8_FATAL" },
+ { .fc_id = 589, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF9_FATAL" },
+ { .fc_id = 590, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF10_FATAL" },
+ { .fc_id = 591, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF11_FATAL" },
+ { .fc_id = 592, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF7_FATAL" },
+ { .fc_id = 593, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF6_FATAL" },
+ { .fc_id = 594, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF5_FATAL" },
+ { .fc_id = 595, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF4_FATAL" },
+ { .fc_id = 596, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF15_FATAL" },
+ { .fc_id = 597, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF14_FATAL" },
+ { .fc_id = 598, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF13_FATAL" },
+ { .fc_id = 599, .cpu_id = 120, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HIF12_FATAL" },
+ { .fc_id = 600, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 601, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 602, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 603, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 604, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_AXI_ERROR_RESPONSE" },
+ { .fc_id = 605, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_AXI_ERROR_RESPONSE" },
+ { .fc_id = 606, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_AXI_ERROR_RESPONSE" },
+ { .fc_id = 607, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_AXI_ERROR_RESPONSE" },
+ { .fc_id = 608, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_AXI_ERROR_RESPONSE" },
+ { .fc_id = 609, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_AXI_ERROR_RESPONSE" },
+ { .fc_id = 610, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_AXI_ERROR_RESPONSE" },
+ { .fc_id = 611, .cpu_id = 121, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_AXI_ERROR_RESPONSE" },
+ { .fc_id = 612, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 613, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 614, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM2_AXI_ERROR_RESPONSE" },
+ { .fc_id = 615, .cpu_id = 122, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "SM3_AXI_ERROR_RESPONSE" },
+ { .fc_id = 616, .cpu_id = 123, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ARC_AXI_ERROR_RESPONSE" },
+ { .fc_id = 617, .cpu_id = 124, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 618, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 619, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_FLR_REQUESTED" },
+ { .fc_id = 620, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 621, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 622, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PCIE_APB_TIMEOUT" },
+ { .fc_id = 623, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 624, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 625, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 626, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 627, .cpu_id = 125, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_FATAL_ERR" },
+ { .fc_id = 628, .cpu_id = 125, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 629, .cpu_id = 126, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 630, .cpu_id = 127, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 631, .cpu_id = 128, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_P2P_MSIX" },
+ { .fc_id = 632, .cpu_id = 129, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PCIE_DRAIN_COMPLETE" },
+ { .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC0_BMON_SPMU" },
+ { .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_KERNEL_ERR" },
+ { .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC1_BMON_SPMU" },
+ { .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_KERNEL_ERR" },
+ { .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC2_BMON_SPMU" },
+ { .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_KERNEL_ERR" },
+ { .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC3_BMON_SPMU" },
+ { .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_KERNEL_ERR" },
+ { .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC4_BMON_SPMU" },
+ { .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_KERNEL_ERR" },
+ { .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC5_BMON_SPMU" },
+ { .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_KERNEL_ERR" },
+ { .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC6_BMON_SPMU" },
+ { .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_KERNEL_ERR" },
+ { .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC7_BMON_SPMU" },
+ { .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_KERNEL_ERR" },
+ { .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC8_BMON_SPMU" },
+ { .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_KERNEL_ERR" },
+ { .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC9_BMON_SPMU" },
+ { .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_KERNEL_ERR" },
+ { .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC10_BMON_SPMU" },
+ { .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_KERNEL_ERR" },
+ { .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC11_BMON_SPMU" },
+ { .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_KERNEL_ERR" },
+ { .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC12_BMON_SPMU" },
+ { .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_KERNEL_ERR" },
+ { .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC13_BMON_SPMU" },
+ { .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_KERNEL_ERR" },
+ { .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC14_BMON_SPMU" },
+ { .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_KERNEL_ERR" },
+ { .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC15_BMON_SPMU" },
+ { .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_KERNEL_ERR" },
+ { .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC16_BMON_SPMU" },
+ { .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_KERNEL_ERR" },
+ { .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC17_BMON_SPMU" },
+ { .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_KERNEL_ERR" },
+ { .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC18_BMON_SPMU" },
+ { .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_KERNEL_ERR" },
+ { .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC19_BMON_SPMU" },
+ { .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_KERNEL_ERR" },
+ { .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC20_BMON_SPMU" },
+ { .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_KERNEL_ERR" },
+ { .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC21_BMON_SPMU" },
+ { .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_KERNEL_ERR" },
+ { .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC22_BMON_SPMU" },
+ { .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_KERNEL_ERR" },
+ { .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC23_BMON_SPMU" },
+ { .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_KERNEL_ERR" },
+ { .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "TPC24_BMON_SPMU" },
+ { .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_KERNEL_ERR" },
+ { .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 685, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 686, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 687, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 688, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_CTRL_BMON_SPMU" },
+ { .fc_id = 689, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_SBTE_BMON_SPMU" },
+ { .fc_id = 690, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME0_WAP_BMON_SPMU" },
+ { .fc_id = 691, .cpu_id = 180, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 692, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 693, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 694, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 695, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 696, .cpu_id = 181, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 697, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_CTRL_BMON_SPMU" },
+ { .fc_id = 698, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_SBTE_BMON_SPMU" },
+ { .fc_id = 699, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME1_WAP_BMON_SPMU" },
+ { .fc_id = 700, .cpu_id = 181, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 701, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 702, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 703, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 704, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 705, .cpu_id = 182, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 706, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_CTRL_BMON_SPMU" },
+ { .fc_id = 707, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_SBTE_BMON_SPMU" },
+ { .fc_id = 708, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME2_WAP_BMON_SPMU" },
+ { .fc_id = 709, .cpu_id = 182, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 710, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 711, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 712, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 713, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 714, .cpu_id = 183, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 715, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_CTRL_BMON_SPMU" },
+ { .fc_id = 716, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_SBTE_BMON_SPMU" },
+ { .fc_id = 717, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "MME3_WAP_BMON_SPMU" },
+ { .fc_id = 718, .cpu_id = 183, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_WAP_SOURCE_RESULT_INVALID" },
+ { .fc_id = 719, .cpu_id = 184, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 720, .cpu_id = 184, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU0_PAGE_FAULT_OR_WR_PERM" },
+ { .fc_id = 721, .cpu_id = 184, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU0_SECURITY_ERROR" },
+ { .fc_id = 722, .cpu_id = 185, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 723, .cpu_id = 185, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU1_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 724, .cpu_id = 185, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU1_SECURITY_ERROR" },
+ { .fc_id = 725, .cpu_id = 186, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 726, .cpu_id = 186, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU2_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 727, .cpu_id = 186, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU2_SECURITY_ERROR" },
+ { .fc_id = 728, .cpu_id = 187, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 729, .cpu_id = 187, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU3_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 730, .cpu_id = 187, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU3_SECURITY_ERROR" },
+ { .fc_id = 731, .cpu_id = 188, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 732, .cpu_id = 188, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU8_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 733, .cpu_id = 188, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU8_SECURITY_ERROR" },
+ { .fc_id = 734, .cpu_id = 189, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 735, .cpu_id = 189, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU9_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 736, .cpu_id = 189, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU9_SECURITY_ERROR" },
+ { .fc_id = 737, .cpu_id = 190, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 738, .cpu_id = 190, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU10_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 739, .cpu_id = 190, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU10_SECURITY_ERROR" },
+ { .fc_id = 740, .cpu_id = 191, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 741, .cpu_id = 191, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU11_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 742, .cpu_id = 191, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU11_SECURITY_ERROR" },
+ { .fc_id = 743, .cpu_id = 192, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 744, .cpu_id = 192, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU7_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 745, .cpu_id = 192, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU7_SECURITY_ERROR" },
+ { .fc_id = 746, .cpu_id = 193, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 747, .cpu_id = 193, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU6_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 748, .cpu_id = 193, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU6_SECURITY_ERROR" },
+ { .fc_id = 749, .cpu_id = 194, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 750, .cpu_id = 194, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU5_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 751, .cpu_id = 194, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU5_SECURITY_ERROR" },
+ { .fc_id = 752, .cpu_id = 195, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 753, .cpu_id = 195, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU4_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 754, .cpu_id = 195, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU4_SECURITY_ERROR" },
+ { .fc_id = 755, .cpu_id = 196, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 756, .cpu_id = 196, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU15_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 757, .cpu_id = 196, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU15_SECURITY_ERROR" },
+ { .fc_id = 758, .cpu_id = 197, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 759, .cpu_id = 197, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU14_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 760, .cpu_id = 197, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU14_SECURITY_ERROR" },
+ { .fc_id = 761, .cpu_id = 198, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 762, .cpu_id = 198, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU13_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 763, .cpu_id = 198, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU13_SECURITY_ERROR" },
+ { .fc_id = 764, .cpu_id = 199, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 765, .cpu_id = 199, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HMMU12_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 766, .cpu_id = 199, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "HMMU12_SECURITY_ERROR" },
+ { .fc_id = 767, .cpu_id = 200, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 768, .cpu_id = 201, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PMMU0_PAGE_FAULT_WR_PERM" },
+ { .fc_id = 769, .cpu_id = 202, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PMMU0_SECURITY_ERROR" },
+ { .fc_id = 770, .cpu_id = 203, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA2_BM_SPMU" },
+ { .fc_id = 771, .cpu_id = 204, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 772, .cpu_id = 205, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA3_BM_SPMU" },
+ { .fc_id = 773, .cpu_id = 206, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 774, .cpu_id = 207, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA0_BM_SPMU" },
+ { .fc_id = 775, .cpu_id = 208, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 776, .cpu_id = 209, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA1_BM_SPMU" },
+ { .fc_id = 777, .cpu_id = 210, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 778, .cpu_id = 211, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA6_BM_SPMU" },
+ { .fc_id = 779, .cpu_id = 212, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 780, .cpu_id = 213, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA7_BM_SPMU" },
+ { .fc_id = 781, .cpu_id = 214, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 782, .cpu_id = 215, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA4_BM_SPMU" },
+ { .fc_id = 783, .cpu_id = 216, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 784, .cpu_id = 217, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "EDMA5_BM_SPMU" },
+ { .fc_id = 785, .cpu_id = 218, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 786, .cpu_id = 219, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "KDMA_BM_SPMU" },
+ { .fc_id = 787, .cpu_id = 220, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 788, .cpu_id = 221, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA0_BM_SPMU" },
+ { .fc_id = 789, .cpu_id = 222, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PDMA1_BM_SPMU" },
+ { .fc_id = 790, .cpu_id = 223, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC0_SPI" },
+ { .fc_id = 791, .cpu_id = 224, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM0_MC1_SPI" },
+ { .fc_id = 792, .cpu_id = 225, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC0_SPI" },
+ { .fc_id = 793, .cpu_id = 226, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM1_MC1_SPI" },
+ { .fc_id = 794, .cpu_id = 227, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC0_SPI" },
+ { .fc_id = 795, .cpu_id = 228, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM2_MC1_SPI" },
+ { .fc_id = 796, .cpu_id = 229, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC0_SPI" },
+ { .fc_id = 797, .cpu_id = 230, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM3_MC1_SPI" },
+ { .fc_id = 798, .cpu_id = 231, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC0_SPI" },
+ { .fc_id = 799, .cpu_id = 232, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM4_MC1_SPI" },
+ { .fc_id = 800, .cpu_id = 233, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC0_SPI" },
+ { .fc_id = 801, .cpu_id = 234, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HBM5_MC1_SPI" },
+ { .fc_id = 802, .cpu_id = 235, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 803, .cpu_id = 236, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 804, .cpu_id = 237, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 805, .cpu_id = 238, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 806, .cpu_id = 239, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 807, .cpu_id = 240, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 808, .cpu_id = 241, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 809, .cpu_id = 242, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 810, .cpu_id = 243, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 811, .cpu_id = 244, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 812, .cpu_id = 245, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 813, .cpu_id = 246, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 814, .cpu_id = 247, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 815, .cpu_id = 248, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 816, .cpu_id = 249, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 817, .cpu_id = 250, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 818, .cpu_id = 251, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 819, .cpu_id = 252, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 820, .cpu_id = 253, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 821, .cpu_id = 254, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 822, .cpu_id = 255, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 823, .cpu_id = 256, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 824, .cpu_id = 257, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 825, .cpu_id = 258, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 826, .cpu_id = 259, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 827, .cpu_id = 260, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 828, .cpu_id = 261, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 829, .cpu_id = 262, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 830, .cpu_id = 263, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 831, .cpu_id = 264, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 832, .cpu_id = 265, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 833, .cpu_id = 266, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 834, .cpu_id = 267, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 835, .cpu_id = 268, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 836, .cpu_id = 269, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 837, .cpu_id = 270, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 838, .cpu_id = 271, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 839, .cpu_id = 272, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 840, .cpu_id = 273, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 841, .cpu_id = 274, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 842, .cpu_id = 275, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 843, .cpu_id = 276, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 844, .cpu_id = 277, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 845, .cpu_id = 278, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 846, .cpu_id = 279, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 847, .cpu_id = 280, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 848, .cpu_id = 281, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 849, .cpu_id = 282, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 850, .cpu_id = 283, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 851, .cpu_id = 284, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 852, .cpu_id = 285, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 853, .cpu_id = 286, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 854, .cpu_id = 287, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 855, .cpu_id = 288, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 856, .cpu_id = 289, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 857, .cpu_id = 290, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 858, .cpu_id = 291, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 859, .cpu_id = 292, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 860, .cpu_id = 293, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 861, .cpu_id = 294, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 862, .cpu_id = 295, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 863, .cpu_id = 296, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 864, .cpu_id = 297, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 865, .cpu_id = 298, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 866, .cpu_id = 299, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 867, .cpu_id = 300, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 868, .cpu_id = 301, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 869, .cpu_id = 302, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 870, .cpu_id = 303, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 871, .cpu_id = 304, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "RPM_ERROR_OR_DRAIN" },
+ { .fc_id = 872, .cpu_id = 305, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 873, .cpu_id = 306, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 874, .cpu_id = 307, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 875, .cpu_id = 308, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "RAZWI_OR_PID_MIN_MAX_INTERRUPT" },
+ { .fc_id = 876, .cpu_id = 309, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 877, .cpu_id = 310, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 878, .cpu_id = 311, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 879, .cpu_id = 312, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "" },
+ { .fc_id = 880, .cpu_id = 313, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 881, .cpu_id = 314, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 882, .cpu_id = 315, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 883, .cpu_id = 316, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 884, .cpu_id = 317, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 885, .cpu_id = 318, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 886, .cpu_id = 319, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 887, .cpu_id = 320, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 888, .cpu_id = 321, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 889, .cpu_id = 322, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 890, .cpu_id = 323, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 891, .cpu_id = 324, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 892, .cpu_id = 325, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 893, .cpu_id = 326, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 894, .cpu_id = 327, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 895, .cpu_id = 328, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 896, .cpu_id = 329, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC0_SPI" },
+ { .fc_id = 897, .cpu_id = 329, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC0_BMON_SPMU" },
+ { .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_SPI" },
+ { .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC1_SPI" },
+ { .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC2_SPI" },
+ { .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC2_BMON_SPMU" },
+ { .fc_id = 902, .cpu_id = 332, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC3_SPI" },
+ { .fc_id = 903, .cpu_id = 332, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC3_BMON_SPMU" },
+ { .fc_id = 904, .cpu_id = 333, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC4_SPI" },
+ { .fc_id = 905, .cpu_id = 333, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC4_BMON_SPMU" },
+ { .fc_id = 906, .cpu_id = 334, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC5_SPI" },
+ { .fc_id = 907, .cpu_id = 334, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC5_BMON_SPMU" },
+ { .fc_id = 908, .cpu_id = 335, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC6_SPI" },
+ { .fc_id = 909, .cpu_id = 335, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC6_BMON_SPMU" },
+ { .fc_id = 910, .cpu_id = 336, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC7_SPI" },
+ { .fc_id = 911, .cpu_id = 336, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC7_BMON_SPMU" },
+ { .fc_id = 912, .cpu_id = 337, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC8_SPI" },
+ { .fc_id = 913, .cpu_id = 337, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC8_BMON_SPMU" },
+ { .fc_id = 914, .cpu_id = 338, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "DEC9_SPI" },
+ { .fc_id = 915, .cpu_id = 338, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "DEC9_BMON_SPMU" },
+ { .fc_id = 916, .cpu_id = 339, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 917, .cpu_id = 340, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 918, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 919, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 920, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 921, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 922, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 923, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 924, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 925, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 926, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 927, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 928, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 929, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 930, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 931, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 932, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 933, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 934, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 935, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 936, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 937, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 938, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 939, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 940, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 941, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 942, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 943, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 944, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 945, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 946, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 947, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 948, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 949, .cpu_id = 341, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 950, .cpu_id = 342, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 951, .cpu_id = 343, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_BMON_SPMU" },
+ { .fc_id = 952, .cpu_id = 343, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC0_SW_ERROR" },
+ { .fc_id = 953, .cpu_id = 343, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 954, .cpu_id = 343, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 955, .cpu_id = 344, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_BMON_SPMU" },
+ { .fc_id = 956, .cpu_id = 344, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC1_SW_ERROR" },
+ { .fc_id = 957, .cpu_id = 344, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 958, .cpu_id = 344, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 959, .cpu_id = 345, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_BMON_SPMU" },
+ { .fc_id = 960, .cpu_id = 345, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC2_SW_ERROR" },
+ { .fc_id = 961, .cpu_id = 345, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 962, .cpu_id = 345, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 963, .cpu_id = 346, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_BMON_SPMU" },
+ { .fc_id = 964, .cpu_id = 346, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC3_SW_ERROR" },
+ { .fc_id = 965, .cpu_id = 346, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 966, .cpu_id = 346, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 967, .cpu_id = 347, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_BMON_SPMU" },
+ { .fc_id = 968, .cpu_id = 347, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC4_SW_ERROR" },
+ { .fc_id = 969, .cpu_id = 347, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 970, .cpu_id = 347, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 971, .cpu_id = 348, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_BMON_SPMU" },
+ { .fc_id = 972, .cpu_id = 348, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC5_SW_ERROR" },
+ { .fc_id = 973, .cpu_id = 348, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 974, .cpu_id = 348, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 975, .cpu_id = 349, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_BMON_SPMU" },
+ { .fc_id = 976, .cpu_id = 349, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC6_SW_ERROR" },
+ { .fc_id = 977, .cpu_id = 349, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 978, .cpu_id = 349, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 979, .cpu_id = 350, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_BMON_SPMU" },
+ { .fc_id = 980, .cpu_id = 350, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC7_SW_ERROR" },
+ { .fc_id = 981, .cpu_id = 350, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 982, .cpu_id = 350, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 983, .cpu_id = 351, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_BMON_SPMU" },
+ { .fc_id = 984, .cpu_id = 351, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC8_SW_ERROR" },
+ { .fc_id = 985, .cpu_id = 351, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 986, .cpu_id = 351, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 987, .cpu_id = 352, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_BMON_SPMU" },
+ { .fc_id = 988, .cpu_id = 352, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC9_SW_ERROR" },
+ { .fc_id = 989, .cpu_id = 352, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 990, .cpu_id = 352, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 991, .cpu_id = 353, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_BMON_SPMU" },
+ { .fc_id = 992, .cpu_id = 353, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC10_SW_ERROR" },
+ { .fc_id = 993, .cpu_id = 353, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 994, .cpu_id = 353, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 995, .cpu_id = 354, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_BMON_SPMU" },
+ { .fc_id = 996, .cpu_id = 354, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "NIC11_SW_ERROR" },
+ { .fc_id = 997, .cpu_id = 354, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 998, .cpu_id = 354, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 999, .cpu_id = 355, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1000, .cpu_id = 356, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1001, .cpu_id = 357, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1002, .cpu_id = 358, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1003, .cpu_id = 359, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1004, .cpu_id = 360, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1005, .cpu_id = 361, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1006, .cpu_id = 362, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1007, .cpu_id = 363, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1008, .cpu_id = 368, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1009, .cpu_id = 369, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1010, .cpu_id = 366, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1011, .cpu_id = 367, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1012, .cpu_id = 364, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1013, .cpu_id = 365, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1014, .cpu_id = 374, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1015, .cpu_id = 375, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1016, .cpu_id = 372, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1017, .cpu_id = 373, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1018, .cpu_id = 370, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1019, .cpu_id = 371, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1020, .cpu_id = 376, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1021, .cpu_id = 377, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1022, .cpu_id = 378, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1023, .cpu_id = 379, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1024, .cpu_id = 380, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1025, .cpu_id = 381, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1026, .cpu_id = 382, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1027, .cpu_id = 383, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1028, .cpu_id = 384, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1029, .cpu_id = 385, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1030, .cpu_id = 386, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1031, .cpu_id = 387, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1032, .cpu_id = 388, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1033, .cpu_id = 389, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1034, .cpu_id = 390, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1035, .cpu_id = 391, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1036, .cpu_id = 392, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1037, .cpu_id = 393, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1038, .cpu_id = 394, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1039, .cpu_id = 395, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1040, .cpu_id = 396, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1041, .cpu_id = 397, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1042, .cpu_id = 398, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1043, .cpu_id = 399, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1044, .cpu_id = 400, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1045, .cpu_id = 401, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1046, .cpu_id = 402, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1047, .cpu_id = 403, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1048, .cpu_id = 404, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1049, .cpu_id = 405, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1050, .cpu_id = 406, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1051, .cpu_id = 407, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1052, .cpu_id = 408, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1053, .cpu_id = 409, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1054, .cpu_id = 410, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1055, .cpu_id = 411, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1056, .cpu_id = 412, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1057, .cpu_id = 413, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1058, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1059, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1060, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1061, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1062, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1063, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1064, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1065, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1066, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1067, .cpu_id = 414, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1068, .cpu_id = 415, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1069, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1070, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1071, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1072, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1073, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1074, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1075, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1076, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1077, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1078, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1079, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1080, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1081, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1082, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1083, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1084, .cpu_id = 416, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1085, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1086, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1087, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1088, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1089, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1090, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1091, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1092, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1093, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1094, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1095, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1096, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1097, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1098, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1099, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1100, .cpu_id = 417, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1101, .cpu_id = 418, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1102, .cpu_id = 419, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1103, .cpu_id = 420, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1104, .cpu_id = 421, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1105, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1106, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1107, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1108, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1109, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1110, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1111, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1112, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1113, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1114, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1115, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1116, .cpu_id = 422, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1117, .cpu_id = 423, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1118, .cpu_id = 424, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR0_SERR" },
+ { .fc_id = 1119, .cpu_id = 425, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR1_SERR" },
+ { .fc_id = 1120, .cpu_id = 426, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ROTATOR0_DERR" },
+ { .fc_id = 1121, .cpu_id = 427, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "ROTATOR1_DERR" },
+ { .fc_id = 1122, .cpu_id = 428, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROTATOR0_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1123, .cpu_id = 429, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROTATOR1_AXI_ERROR_RESPONSE" },
+ { .fc_id = 1124, .cpu_id = 430, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1125, .cpu_id = 431, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1126, .cpu_id = 432, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR0_BMON_SPMU" },
+ { .fc_id = 1127, .cpu_id = 433, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1128, .cpu_id = 434, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "ROTATOR1_BMON_SPMU" },
+ { .fc_id = 1129, .cpu_id = 435, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1130, .cpu_id = 436, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM0_BMON_SPMU" },
+ { .fc_id = 1131, .cpu_id = 437, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM1_BMON_SPMU" },
+ { .fc_id = 1132, .cpu_id = 438, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM2_BMON_SPMU" },
+ { .fc_id = 1133, .cpu_id = 439, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SM3_BMON_SPMU" },
+ { .fc_id = 1134, .cpu_id = 440, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1135, .cpu_id = 441, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1136, .cpu_id = 442, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1137, .cpu_id = 443, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1138, .cpu_id = 444, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1139, .cpu_id = 445, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1140, .cpu_id = 446, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1141, .cpu_id = 447, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1142, .cpu_id = 448, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1143, .cpu_id = 449, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1144, .cpu_id = 450, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1145, .cpu_id = 451, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1146, .cpu_id = 452, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1147, .cpu_id = 453, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1148, .cpu_id = 454, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1149, .cpu_id = 455, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1150, .cpu_id = 456, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1151, .cpu_id = 457, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1152, .cpu_id = 458, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1153, .cpu_id = 459, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1154, .cpu_id = 460, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1155, .cpu_id = 461, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1156, .cpu_id = 462, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1157, .cpu_id = 463, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1158, .cpu_id = 464, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1159, .cpu_id = 465, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1160, .cpu_id = 466, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1161, .cpu_id = 467, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1162, .cpu_id = 468, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1163, .cpu_id = 469, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1164, .cpu_id = 470, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1165, .cpu_id = 471, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1166, .cpu_id = 472, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1167, .cpu_id = 473, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1168, .cpu_id = 474, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1169, .cpu_id = 475, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1170, .cpu_id = 476, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1171, .cpu_id = 477, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1172, .cpu_id = 478, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1174, .cpu_id = 480, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1177, .cpu_id = 483, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1178, .cpu_id = 484, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1179, .cpu_id = 485, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1180, .cpu_id = 486, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1181, .cpu_id = 487, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1182, .cpu_id = 488, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1183, .cpu_id = 489, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1184, .cpu_id = 490, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1185, .cpu_id = 491, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1186, .cpu_id = 492, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1187, .cpu_id = 493, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1188, .cpu_id = 494, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1189, .cpu_id = 495, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1190, .cpu_id = 496, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1191, .cpu_id = 497, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1192, .cpu_id = 498, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1193, .cpu_id = 499, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1194, .cpu_id = 500, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1195, .cpu_id = 501, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1196, .cpu_id = 502, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1197, .cpu_id = 503, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1198, .cpu_id = 504, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1199, .cpu_id = 505, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1200, .cpu_id = 506, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1201, .cpu_id = 507, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1202, .cpu_id = 508, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1203, .cpu_id = 509, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1204, .cpu_id = 510, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC0_QM" },
+ { .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC1_QM" },
+ { .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC2_QM" },
+ { .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC3_QM" },
+ { .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC4_QM" },
+ { .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC5_QM" },
+ { .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC6_QM" },
+ { .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC7_QM" },
+ { .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC8_QM" },
+ { .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC9_QM" },
+ { .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC10_QM" },
+ { .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC11_QM" },
+ { .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC12_QM" },
+ { .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC13_QM" },
+ { .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC14_QM" },
+ { .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC15_QM" },
+ { .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC16_QM" },
+ { .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC17_QM" },
+ { .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC18_QM" },
+ { .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC19_QM" },
+ { .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC20_QM" },
+ { .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC21_QM" },
+ { .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC22_QM" },
+ { .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC23_QM" },
+ { .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "TPC24_QM" },
+ { .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "" },
+ { .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME0_QM" },
+ { .fc_id = 1233, .cpu_id = 539, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME1_QM" },
+ { .fc_id = 1234, .cpu_id = 540, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME2_QM" },
+ { .fc_id = 1235, .cpu_id = 541, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "MME3_QM" },
+ { .fc_id = 1236, .cpu_id = 542, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA2_QM" },
+ { .fc_id = 1237, .cpu_id = 543, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA3_QM" },
+ { .fc_id = 1238, .cpu_id = 544, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA0_QM" },
+ { .fc_id = 1239, .cpu_id = 545, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA1_QM" },
+ { .fc_id = 1240, .cpu_id = 546, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA6_QM" },
+ { .fc_id = 1241, .cpu_id = 547, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA7_QM" },
+ { .fc_id = 1242, .cpu_id = 548, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA4_QM" },
+ { .fc_id = 1243, .cpu_id = 549, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA5_QM" },
+ { .fc_id = 1244, .cpu_id = 550, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA0_QM" },
+ { .fc_id = 1245, .cpu_id = 551, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA1_QM" },
+ { .fc_id = 1246, .cpu_id = 552, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "PI_UPDATE" },
+ { .fc_id = 1247, .cpu_id = 553, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "HALT_MACHINE" },
+ { .fc_id = 1248, .cpu_id = 554, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "INTS_REGISTER" },
+ { .fc_id = 1249, .cpu_id = 555, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROT0_QM" },
+ { .fc_id = 1250, .cpu_id = 556, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ROT1_QM" },
+ { .fc_id = 1251, .cpu_id = 557, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "SOFT_RESET" },
+ { .fc_id = 1252, .cpu_id = 558, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPLD_SHUTDOWN_CAUSE" },
+ { .fc_id = 1253, .cpu_id = 559, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_POWER_ENV_S" },
+ { .fc_id = 1254, .cpu_id = 560, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_POWER_ENV_E" },
+ { .fc_id = 1255, .cpu_id = 561, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_THERMAL_ENV_S" },
+ { .fc_id = 1256, .cpu_id = 562, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "FIX_THERMAL_ENV_E" },
+ { .fc_id = 1257, .cpu_id = 563, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "CPLD_SHUTDOWN_EVENT" },
+ { .fc_id = 1258, .cpu_id = 564, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "PKT_QUEUE_OUT_SYNC" },
+ { .fc_id = 1259, .cpu_id = 565, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA2_CORE" },
+ { .fc_id = 1260, .cpu_id = 566, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA3_CORE" },
+ { .fc_id = 1261, .cpu_id = 567, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA0_CORE" },
+ { .fc_id = 1262, .cpu_id = 568, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA1_CORE" },
+ { .fc_id = 1263, .cpu_id = 569, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA6_CORE" },
+ { .fc_id = 1264, .cpu_id = 570, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA7_CORE" },
+ { .fc_id = 1265, .cpu_id = 571, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA4_CORE" },
+ { .fc_id = 1266, .cpu_id = 572, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "EDMA5_CORE" },
+ { .fc_id = 1267, .cpu_id = 573, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA0_CORE" },
+ { .fc_id = 1268, .cpu_id = 574, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "PDMA1_CORE" },
+ { .fc_id = 1269, .cpu_id = 575, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "KDMA0_CORE" },
+ { .fc_id = 1270, .cpu_id = 576, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_QM0" },
+ { .fc_id = 1271, .cpu_id = 577, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC0_QM1" },
+ { .fc_id = 1272, .cpu_id = 578, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_QM0" },
+ { .fc_id = 1273, .cpu_id = 579, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC1_QM1" },
+ { .fc_id = 1274, .cpu_id = 580, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_QM0" },
+ { .fc_id = 1275, .cpu_id = 581, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC2_QM1" },
+ { .fc_id = 1276, .cpu_id = 582, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_QM0" },
+ { .fc_id = 1277, .cpu_id = 583, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC3_QM1" },
+ { .fc_id = 1278, .cpu_id = 584, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_QM0" },
+ { .fc_id = 1279, .cpu_id = 585, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC4_QM1" },
+ { .fc_id = 1280, .cpu_id = 586, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_QM0" },
+ { .fc_id = 1281, .cpu_id = 587, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC5_QM1" },
+ { .fc_id = 1282, .cpu_id = 588, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_QM0" },
+ { .fc_id = 1283, .cpu_id = 589, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC6_QM1" },
+ { .fc_id = 1284, .cpu_id = 590, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_QM0" },
+ { .fc_id = 1285, .cpu_id = 591, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC7_QM1" },
+ { .fc_id = 1286, .cpu_id = 592, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_QM0" },
+ { .fc_id = 1287, .cpu_id = 593, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC8_QM1" },
+ { .fc_id = 1288, .cpu_id = 594, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_QM0" },
+ { .fc_id = 1289, .cpu_id = 595, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC9_QM1" },
+ { .fc_id = 1290, .cpu_id = 596, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_QM0" },
+ { .fc_id = 1291, .cpu_id = 597, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC10_QM1" },
+ { .fc_id = 1292, .cpu_id = 598, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_QM0" },
+ { .fc_id = 1293, .cpu_id = 599, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "NIC11_QM1" },
+ { .fc_id = 1294, .cpu_id = 600, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "CPU_PKT_SANITY_FAILED" },
+ { .fc_id = 1295, .cpu_id = 601, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC0_ENG0" },
+ { .fc_id = 1296, .cpu_id = 602, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC0_ENG1" },
+ { .fc_id = 1297, .cpu_id = 603, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC1_ENG0" },
+ { .fc_id = 1298, .cpu_id = 604, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC1_ENG1" },
+ { .fc_id = 1299, .cpu_id = 605, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC2_ENG0" },
+ { .fc_id = 1300, .cpu_id = 606, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC2_ENG1" },
+ { .fc_id = 1301, .cpu_id = 607, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC3_ENG0" },
+ { .fc_id = 1302, .cpu_id = 608, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC3_ENG1" },
+ { .fc_id = 1303, .cpu_id = 609, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC4_ENG0" },
+ { .fc_id = 1304, .cpu_id = 610, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC4_ENG1" },
+ { .fc_id = 1305, .cpu_id = 611, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC5_ENG0" },
+ { .fc_id = 1306, .cpu_id = 612, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC5_ENG1" },
+ { .fc_id = 1307, .cpu_id = 613, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC6_ENG0" },
+ { .fc_id = 1308, .cpu_id = 614, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC6_ENG1" },
+ { .fc_id = 1309, .cpu_id = 615, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC7_ENG0" },
+ { .fc_id = 1310, .cpu_id = 616, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC7_ENG1" },
+ { .fc_id = 1311, .cpu_id = 617, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC8_ENG0" },
+ { .fc_id = 1312, .cpu_id = 618, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC8_ENG1" },
+ { .fc_id = 1313, .cpu_id = 619, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC9_ENG0" },
+ { .fc_id = 1314, .cpu_id = 620, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC9_ENG1" },
+ { .fc_id = 1315, .cpu_id = 621, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC10_ENG0" },
+ { .fc_id = 1316, .cpu_id = 622, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC10_ENG1" },
+ { .fc_id = 1317, .cpu_id = 623, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC11_ENG0" },
+ { .fc_id = 1318, .cpu_id = 624, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
+ .name = "STATUS_NIC11_ENG1" },
+ { .fc_id = 1319, .cpu_id = 625, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
+ .name = "ARC_DCCM_FULL" },
+ { .fc_id = 1320, .cpu_id = 626, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "FP32_NOT_SUPPORTED" },
+ { .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
+ .name = "DEV_RESET_REQ" },
};
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
index 82f3ca2a3966..8522f24deac0 100644
--- a/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
@@ -63,7 +63,10 @@ struct gaudi2_cold_rst_data {
u32 fake_sig_validation_en : 1;
u32 bist_skip_enable : 1;
u32 bist_need_iatu_config : 1;
- u32 reserved : 24;
+ u32 fake_bis_compliant : 1;
+ u32 wd_rst_cause_arm : 1;
+ u32 wd_rst_cause_arcpid : 1;
+ u32 reserved : 21;
};
__le32 data;
};
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 10975bb603fb..a35dd0e41c27 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -536,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
static struct acpi_table_header *acpi_get_pptt(void)
{
static struct acpi_table_header *pptt;
+ static bool is_pptt_checked;
acpi_status status;
/*
* PPTT will be used at runtime on every CPU hotplug in path, so we
* don't need to call acpi_put_table() to release the table mapping.
*/
- if (!pptt) {
+ if (!pptt && !is_pptt_checked) {
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
if (ACPI_FAILURE(status))
acpi_pptt_warn_missing();
+
+ is_pptt_checked = true;
}
return pptt;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 1278969eec1f..4bd16b3f0781 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -263,6 +263,12 @@ static int __init acpi_processor_driver_init(void)
if (acpi_disabled)
return 0;
+ if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER)) {
+ acpi_processor_cpufreq_init = true;
+ acpi_processor_ignore_ppc_init();
+ }
+
result = driver_register(&acpi_processor_driver);
if (result < 0)
return result;
@@ -276,12 +282,6 @@ static int __init acpi_processor_driver_init(void)
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
- if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
- CPUFREQ_POLICY_NOTIFIER)) {
- acpi_processor_cpufreq_init = true;
- acpi_processor_ignore_ppc_init();
- }
-
acpi_processor_throttling_init();
return 0;
err:
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e534fd49a67e..b7c6287eccca 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -140,9 +140,13 @@ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
ret = freq_qos_add_request(&policy->constraints,
&pr->thermal_req,
FREQ_QOS_MAX, INT_MAX);
- if (ret < 0)
+ if (ret < 0) {
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ continue;
+ }
+
+ thermal_cooling_device_update(pr->cdev);
}
}
@@ -153,8 +157,12 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
- if (pr)
- freq_qos_remove_request(&pr->thermal_req);
+ if (!pr)
+ continue;
+
+ freq_qos_remove_request(&pr->thermal_req);
+
+ thermal_cooling_device_update(pr->cdev);
}
}
#else /* ! CONFIG_CPU_FREQ */
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7c9125df5a65..7b4801ce62d6 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "M17T"),
},
},
+ {
+ .ident = "MEDION S17413",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 710ac640267d..fd7cbce8076e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -497,6 +497,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Acer Aspire 3830TG */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Acer Aspire 4810T */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -716,6 +724,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index e45285d4e62a..da5727069d85 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -251,6 +251,7 @@ bool force_storage_d3(void)
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -280,13 +281,35 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
+ /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_UART1_TTY_UART2_SKIP |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Lenovo Yoga Book X90F/L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
.matches = {
@@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Lenovo Yoga Tablet 2 1050F/L */
@@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Whitelabel (sold as various brands) TM800A550L */
@@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
return 0;
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
+bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
+ return false;
+
+ quirks = (unsigned long)dmi_id->driver_data;
+ return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
#endif
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 294a266a0dda..c1576d943b43 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -381,6 +381,7 @@ static void pata_parport_dev_release(struct device *dev)
{
struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+ ida_free(&pata_parport_bus_dev_ids, dev->id);
kfree(pi);
}
@@ -433,23 +434,27 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
return NULL;
+ id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
- if (!pi)
+ if (!pi) {
+ ida_free(&pata_parport_bus_dev_ids, id);
return NULL;
+ }
/* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
pi->dev.parent = &pata_parport_bus;
pi->dev.bus = &pata_parport_bus_type;
pi->dev.driver = &pr->driver;
pi->dev.release = pata_parport_dev_release;
- id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
- if (id < 0)
- return NULL; /* pata_parport_dev_release will do kfree(pi) */
pi->dev.id = id;
dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
if (device_register(&pi->dev)) {
put_device(&pi->dev);
- goto out_ida_free;
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+ return NULL;
}
pi->proto = pr;
@@ -464,8 +469,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
pi->port = parport->base;
par_cb.private = pi;
- pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb,
- pi->dev.id);
+ pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
if (!pi->pardev)
goto out_module_put;
@@ -487,12 +491,13 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
pi_connect(pi);
if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
- goto out_unreg_parport;
+ goto out_disconnect;
return pi;
-out_unreg_parport:
+out_disconnect:
pi_disconnect(pi);
+out_unreg_parport:
parport_unregister_device(pi->pardev);
if (pi->proto->release_proto)
pi->proto->release_proto(pi);
@@ -500,8 +505,7 @@ out_module_put:
module_put(pi->proto->owner);
out_unreg_dev:
device_unregister(&pi->dev);
-out_ida_free:
- ida_free(&pata_parport_bus_dev_ids, pi->dev.id);
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
return NULL;
}
@@ -626,8 +630,7 @@ static void pi_remove_one(struct device *dev)
pi_disconnect(pi);
pi_release(pi);
device_unregister(dev);
- ida_free(&pata_parport_bus_dev_ids, dev->id);
- /* pata_parport_dev_release will do kfree(pi) */
+ /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
}
static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
@@ -643,6 +646,7 @@ static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
}
pi_remove_one(dev);
+ put_device(dev);
mutex_unlock(&pi_mutex);
return count;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index eec0cc2144e0..e327a0229dc1 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
+ kfree(vc);
}
}
}
@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
return 0;
}
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+ struct vc_map *vc = card->vcs[0];
+
+ free_scq(card, vc->scq);
+ kfree(vc);
+}
+
static int
idt77252_dev_open(struct idt77252_dev *card)
{
@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
struct idt77252_dev *card = dev->dev_data;
u32 conf;
+ close_card_ubr0(card);
close_card_oam(card);
conf = SAR_CFG_RXPTH | /* enable receive path */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 839373451c2b..28eb59fd71ca 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
+ struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
+ struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
+ const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
- if (cmd->blkcg_css)
- kthread_associate_blkcg(cmd->blkcg_css);
- if (cmd->memcg_css)
+ if (cmd_blkcg_css)
+ kthread_associate_blkcg(cmd_blkcg_css);
+ if (cmd_memcg_css)
old_memcg = set_active_memcg(
- mem_cgroup_from_css(cmd->memcg_css));
+ mem_cgroup_from_css(cmd_memcg_css));
+ /*
+ * do_req_filebacked() may call blk_mq_complete_request() synchronously
+ * or asynchronously if using aio. Hence, do not touch 'cmd' after
+ * do_req_filebacked() has returned unless we are sure that 'cmd' has
+ * not yet been completed.
+ */
ret = do_req_filebacked(lo, rq);
- if (cmd->blkcg_css)
+ if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
- if (cmd->memcg_css) {
+ if (cmd_memcg_css) {
set_active_memcg(old_memcg);
- css_put(cmd->memcg_css);
+ css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
- if (!cmd->use_aio || ret) {
+ if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 4c601ca9552a..9e6b032c8ecc 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
- if (likely(!blk_should_fake_timeout(cmd->rq->q)))
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
@@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
}
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+ const struct blk_mq_queue_data *bd)
{
- struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct request *rq = bd->rq;
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct nullb_queue *nq = hctx->driver_data;
- sector_t nr_sectors = blk_rq_sectors(bd->rq);
- sector_t sector = blk_rq_pos(bd->rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ sector_t sector = blk_rq_pos(rq);
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
- cmd->rq = bd->rq;
+ cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
- cmd->fake_timeout = should_timeout_request(bd->rq);
+ cmd->fake_timeout = should_timeout_request(rq) ||
+ blk_should_fake_timeout(rq->q);
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
- if (should_requeue_request(bd->rq)) {
+ if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
* driver driven requeue path
@@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
nq->requeue_selection++;
if (nq->requeue_selection & 1)
return BLK_STS_RESOURCE;
- else {
- blk_mq_requeue_request(bd->rq, true);
- return BLK_STS_OK;
- }
+ blk_mq_requeue_request(rq, true);
+ return BLK_STS_OK;
}
if (is_poll) {
spin_lock(&nq->poll_lock);
- list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ list_add_tail(&rq->queuelist, &nq->poll_list);
spin_unlock(&nq->poll_lock);
return BLK_STS_OK;
}
if (cmd->fake_timeout)
return BLK_STS_OK;
- return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+ return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
}
static void cleanup_queue(struct nullb_queue *nq)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index fb855da971ee..9fa821fa76b0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
print_version();
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index d1d1c8d606c8..c73cc57ec547 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -715,7 +715,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
}
}
-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+static void ubq_complete_io_cmd(struct ublk_io *io, int res,
+ unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -727,7 +728,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0);
+ io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -744,7 +745,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
-static inline void __ublk_rq_task_work(struct request *req)
+static inline void __ublk_rq_task_work(struct request *req,
+ unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
@@ -782,7 +784,7 @@ static inline void __ublk_rq_task_work(struct request *req)
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
@@ -820,17 +822,18 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
+ unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+ __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
@@ -842,12 +845,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
@@ -856,8 +859,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ unsigned issue_flags = IO_URING_F_UNLOCKED;
- ublk_forward_io_cmds(ubq);
+ ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@@ -1111,7 +1115,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
+ IO_URING_F_UNLOCKED);
}
/* all io commands are canceled */
@@ -1351,7 +1356,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
@@ -1602,17 +1607,18 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
get_device(&ub->cdev_dev);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = add_disk(disk);
if (ret) {
/*
* Has to drop the reference since ->free_disk won't be
* called in case of add_disk failure.
*/
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
out_put_disk:
if (ret)
put_disk(disk);
@@ -2233,7 +2239,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0);
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index bede8b005594..af774688f1c0 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -26,7 +26,14 @@
#define ECDSA_HEADER_LEN 320
#define BTINTEL_PPAG_NAME "PPAG"
-#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB"
+
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+ u32 domain;
+ u32 mode;
+ acpi_status status;
+ struct hci_dev *hdev;
+};
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
@@ -1295,17 +1302,16 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
- if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer,
- strlen(BTINTEL_PPAG_PREFIX))) {
+ len = strlen(string.pointer);
+ if (len < strlen(BTINTEL_PPAG_NAME)) {
kfree(string.pointer);
return AE_OK;
}
- len = strlen(string.pointer);
if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
kfree(string.pointer);
return AE_OK;
@@ -1314,7 +1320,8 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
- bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ ppag->status = status;
+ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
@@ -1323,8 +1330,9 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
kfree(buffer.pointer);
- bt_dev_warn(hdev, "Invalid object type: %d or package count: %d",
+ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
p->type, p->package.count);
+ ppag->status = AE_ERROR;
return AE_ERROR;
}
@@ -1335,6 +1343,7 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
ppag->domain = (u32)p->package.elements[0].integer.value;
ppag->mode = (u32)p->package.elements[1].integer.value;
+ ppag->status = AE_OK;
kfree(buffer.pointer);
return AE_CTRL_TERMINATE;
}
@@ -2314,12 +2323,12 @@ error:
static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
{
- acpi_status status;
struct btintel_ppag ppag;
struct sk_buff *skb;
struct btintel_loc_aware_reg ppag_cmd;
+ acpi_handle handle;
- /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
switch (ver->cnvr_top & 0xFFF) {
case 0x504: /* Hrp2 */
case 0x202: /* Jfp2 */
@@ -2327,29 +2336,35 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
return;
}
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_info(hdev, "No support for BT device in ACPI firmware");
+ return;
+ }
+
memset(&ppag, 0, sizeof(ppag));
ppag.hdev = hdev;
- status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, NULL,
- btintel_ppag_callback, &ppag, NULL);
+ ppag.status = AE_NOT_FOUND;
+ acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
+ btintel_ppag_callback, &ppag, NULL);
- if (ACPI_FAILURE(status)) {
- /* Do not log warning message if ACPI entry is not found */
- if (status == AE_NOT_FOUND)
+ if (ACPI_FAILURE(ppag.status)) {
+ if (ppag.status == AE_NOT_FOUND) {
+ bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
return;
- bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status));
+ }
return;
}
if (ppag.domain != 0x12) {
- bt_dev_warn(hdev, "PPAG-BT Domain disabled");
+ bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth");
return;
}
/* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
if (!(ppag.mode & BIT(0))) {
- bt_dev_dbg(hdev, "PPAG disabled");
+ bt_dev_dbg(hdev, "PPAG-BT: disabled");
return;
}
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 8e7da877efae..8fdb65b66315 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -137,13 +137,6 @@ struct intel_offload_use_cases {
__u8 preset[8];
} __packed;
-/* structure to store the PPAG data read from ACPI table */
-struct btintel_ppag {
- u32 domain;
- u32 mode;
- struct hci_dev *hdev;
-};
-
struct btintel_loc_aware_reg {
__le32 mcc;
__le32 sel;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 2acb719e596f..11c7e04bf394 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
return 0;
}
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ int ret;
+
+ ret = qca_set_bdaddr_rome(hdev, bdaddr);
+ if (ret)
+ return ret;
+
+ /* The firmware stops responding for a while after setting the bdaddr,
+ * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+ */
+ usleep_range(1000, 10000);
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
- hdev->set_bdaddr = qca_set_bdaddr_rome;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;
ret = hci_register_dev(hdev);
if (ret < 0)
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 795be33f2892..02893600db39 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -354,6 +354,7 @@ static void btsdio_remove(struct sdio_func *func)
BT_DBG("func %p", func);
+ cancel_work_sync(&data->work);
if (!data)
return;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 18bc94718711..5c536151ef83 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1050,21 +1050,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
hci_skb_expect(skb) -= len;
if (skb->len == HCI_ACL_HDR_SIZE) {
- __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
__le16 dlen = hci_acl_hdr(skb)->dlen;
- __u8 type;
/* Complete ACL header */
hci_skb_expect(skb) = __le16_to_cpu(dlen);
- /* Detect if ISO packet has been sent over bulk */
- if (hci_conn_num(data->hdev, ISO_LINK)) {
- type = hci_conn_lookup_type(data->hdev,
- hci_handle(handle));
- if (type == ISO_LINK)
- hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
- }
-
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
kfree_skb(skb);
skb = NULL;
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 2a6b4f676458..36d42484142a 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -204,8 +204,8 @@ static int weim_parse_dt(struct platform_device *pdev)
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
struct device_node *child;
- int ret, have_child = 0;
struct weim_priv *priv;
void __iomem *base;
u32 reg;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ce3ccd172cc8..253f2ddb8913 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
/* Basic CPU round-robin, which avoids the current CPU. */
do {
cpu = cpumask_next(cpu, &timer_cpus);
- if (cpu == nr_cpumask_bits)
+ if (cpu >= nr_cpu_ids)
cpu = cpumask_first(&timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 40360e599bc3..bd757d836c5c 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -144,8 +144,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
ret = -EIO;
virt = acpi_os_map_iomem(start, len);
- if (!virt)
+ if (!virt) {
+ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
+ /* try EFI log next */
+ ret = -ENODEV;
goto err;
+ }
memcpy_fromio(log->bios_event_log, virt, len);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index b99f55f2d4fd..0601e6e5e326 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -511,6 +511,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+/*
+ * Some AMD fTPM versions may cause stutter
+ * https://www.amd.com/en/support/kb/faq/pa-410
+ *
+ * Fixes are available in two series of fTPM firmware:
+ * 6.x.y.z series: 6.0.18.6 +
+ * 3.x.y.z series: 3.57.y.5 +
+ */
+static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+{
+ u32 val1, val2;
+ u64 version;
+ int ret;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return false;
+
+ ret = tpm_request_locality(chip);
+ if (ret)
+ return false;
+
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
+ if (ret)
+ goto release;
+ if (val1 != 0x414D4400U /* AMD */) {
+ ret = -ENODEV;
+ goto release;
+ }
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
+ if (ret)
+ goto release;
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
+
+release:
+ tpm_relinquish_locality(chip);
+
+ if (ret)
+ return false;
+
+ version = ((u64)val1 << 32) | val2;
+ if ((version >> 48) == 6) {
+ if (version >= 0x0006000000180006ULL)
+ return false;
+ } else if ((version >> 48) == 3) {
+ if (version >= 0x0003005700000005ULL)
+ return false;
+ } else {
+ return false;
+ }
+
+ dev_warn(&chip->dev,
+ "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
+ version);
+
+ return true;
+}
+
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
@@ -520,7 +577,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int tpm_add_hwrng(struct tpm_chip *chip)
{
- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
+ tpm_amd_is_rng_defective(chip))
return 0;
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 24ee4e1cc452..830014a26609 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -150,6 +150,79 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
+enum tpm2_pt_props {
+ TPM2_PT_NONE = 0x00000000,
+ TPM2_PT_GROUP = 0x00000100,
+ TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
+ TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
+ TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
+ TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
+ TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
+ TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
+ TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
+ TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
+ TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
+ TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
+ TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
+ TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
+ TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
+ TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
+ TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
+ TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
+ TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
+ TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
+ TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
+ TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
+ TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
+ TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
+ TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
+ TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
+ TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
+ TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
+ TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
+ TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
+ TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
+ TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
+ TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
+ TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
+ TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
+ TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
+ TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
+ TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
+ TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
+ TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
+ TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
+ TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
+ TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
+ TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
+ TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
+ TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
+ TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
+ TPM2_PT_MODES = TPM2_PT_FIXED + 45,
+ TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
+ TPM2_PT_VAR = TPM2_PT_GROUP * 2,
+ TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
+ TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
+ TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
+ TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
+ TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
+ TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
+ TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
+ TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
+ TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
+ TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
+ TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
+ TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
+ TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
+ TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
+ TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
+ TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
+ TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
+ TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
+ TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
+ TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
+ TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
+};
/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
* bytes, but 128 is still a relatively large number of random bytes and
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index b6c5bf69a2b2..1eef05bb1f99 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -91,7 +91,7 @@ config COMMON_CLK_RK808
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
- depends on REGMAP
+ select REGMAP
default MFD_HI655X_PMIC
help
This driver supports the hi655x PMIC clock. This
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 290a2846a86b..0fafa5cba442 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index e74fe6219d14..8dc476ef5bf9 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 clock driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 5225d17d6b3f..8609fca29cc4 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -99,4 +99,3 @@ module_platform_driver(of_fixed_mmio_clk_driver);
MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
index 6238fcea0467..ee5baf993ff2 100644
--- a/drivers/clk/clk-fsl-sai.c
+++ b/drivers/clk/clk-fsl-sai.c
@@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver);
MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
-MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:fsl-sai-clk");
diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
index 67a7cb3503c3..4eed667eddaf 100644
--- a/drivers/clk/clk-k210.c
+++ b/drivers/clk/clk-k210.c
@@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw,
f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
- return (u64)parent_rate * f / (r * od);
+ return div_u64((u64)parent_rate * f, r * od);
}
static const struct clk_ops k210_pll_ops = {
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index 9ea1a80acbe8..8036bd8cbb0a 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -841,5 +841,4 @@ static void __exit hi3559av100_crg_exit(void)
module_exit(hi3559av100_crg_exit);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver");
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 0ddc73e07be4..bce61c45e967 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -291,4 +291,3 @@ module_exit(clk_ccc_exit);
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 6ad2954948a5..11316c3b14ca 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -106,7 +106,8 @@ static void psci_pd_remove(void)
struct psci_pd_provider *pd_provider, *it;
struct generic_pm_domain *genpd;
- list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+ list_for_each_entry_safe_reverse(pd_provider, it,
+ &psci_pd_providers, link) {
of_genpd_del_provider(pd_provider->node);
genpd = of_genpd_remove_last(pd_provider->node);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 5c8a7084577b..9b3ce8948351 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence)
dma_fence_free(fence);
}
+static void dma_fence_array_set_deadline(struct dma_fence *fence,
+ ktime_t deadline)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ dma_fence_set_deadline(array->fences[i], deadline);
+}
+
const struct dma_fence_ops dma_fence_array_ops = {
.get_driver_name = dma_fence_array_get_driver_name,
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
.release = dma_fence_array_release,
+ .set_deadline = dma_fence_array_set_deadline,
};
EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index a0d920576ba6..9663ba1bb6ac 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence)
dma_fence_free(fence);
}
+
+static void dma_fence_chain_set_deadline(struct dma_fence *fence,
+ ktime_t deadline)
+{
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence *f = dma_fence_chain_contained(fence);
+
+ dma_fence_set_deadline(f, deadline);
+ }
+}
+
const struct dma_fence_ops dma_fence_chain_ops = {
.use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
@@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = {
.enable_signaling = dma_fence_chain_enable_signaling,
.signaled = dma_fence_chain_signaled,
.release = dma_fence_chain_release,
+ .set_deadline = dma_fence_chain_set_deadline,
};
EXPORT_SYMBOL(dma_fence_chain_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0de0482cd36e..f177c56269bb 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -913,6 +913,65 @@ err_free_cb:
EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
+ * DOC: deadline hints
+ *
+ * In an ideal world, it would be possible to pipeline a workload sufficiently
+ * that a utilization based device frequency governor could arrive at a minimum
+ * frequency that meets the requirements of the use-case, in order to minimize
+ * power consumption. But in the real world there are many workloads which
+ * defy this ideal. For example, but not limited to:
+ *
+ * * Workloads that ping-pong between device and CPU, with alternating periods
+ * of CPU waiting for device, and device waiting on CPU. This can result in
+ * devfreq and cpufreq seeing idle time in their respective domains and in
+ * result reduce frequency.
+ *
+ * * Workloads that interact with a periodic time based deadline, such as double
+ * buffered GPU rendering vs vblank sync'd page flipping. In this scenario,
+ * missing a vblank deadline results in an *increase* in idle time on the GPU
+ * (since it has to wait an additional vblank period), sending a signal to
+ * the GPU's devfreq to reduce frequency, when in fact the opposite is what is
+ * needed.
+ *
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * The deadline hint provides a way for the waiting driver, or userspace, to
+ * convey an appropriate sense of urgency to the signaling driver.
+ *
+ * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
+ * facing APIs). The time could either be some point in the future (such as
+ * the vblank based deadline for page-flipping, or the start of a compositor's
+ * composition cycle), or the current time to indicate an immediate deadline
+ * hint (Ie. forward progress cannot be made until this fence is signaled).
+ *
+ * Multiple deadlines may be set on a given fence, even in parallel. See the
+ * documentation for &dma_fence_ops.set_deadline.
+ *
+ * The deadline hint is just that, a hint. The driver that created the fence
+ * may react by increasing frequency, making different scheduling choices, etc.
+ * Or doing nothing at all.
+ */
+
+/**
+ * dma_fence_set_deadline - set desired fence-wait deadline hint
+ * @fence: the fence that is to be waited on
+ * @deadline: the time by which the waiter hopes for the fence to be
+ * signaled
+ *
+ * Give the fence signaler a hint about an upcoming deadline, such as
+ * vblank, by which point the waiter would prefer the fence to be
+ * signaled by. This is intended to give feedback to the fence signaler
+ * to aid in power management decisions, such as boosting GPU frequency
+ * if a periodic vblank deadline is approaching but the fence is not
+ * yet signaled..
+ */
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+ if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
+ fence->ops->set_deadline(fence, deadline);
+}
+EXPORT_SYMBOL(dma_fence_set_deadline);
+
+/**
* dma_fence_describe - Dump fence describtion into seq_file
* @fence: the 6fence to describe
* @seq: the seq_file to put the textual description into
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 1c76aed8e262..2a594b754af1 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
+/**
+ * dma_resv_set_deadline - Set a deadline on reservation's objects fences
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ * @deadline: the requested deadline (MONOTONIC)
+ *
+ * May be called without holding the dma_resv lock. Sets @deadline on
+ * all fences filtered by @usage.
+ */
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, obj, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_set_deadline(fence, deadline);
+ }
+ dma_resv_iter_end(&cursor);
+}
+EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
/**
* dma_resv_test_signaled - Test if a reservation object's fences have been
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 73140b854b31..c15928b8c5cc 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/of.h>
#include "common.h"
@@ -436,7 +435,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
/* Nothing to do. */
if (!phead) {
mutex_unlock(&scmi_requested_devices_mtx);
- return scmi_dev;
+ return NULL;
}
/* Walk the list of requested devices for protocol and create them */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index d21c7eafd641..dbc474ff62b7 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2221,8 +2221,8 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
hash_init(info->pending_xfers);
/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
- info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
- sizeof(long), GFP_KERNEL);
+ info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+ GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
@@ -2657,6 +2657,7 @@ static int scmi_probe(struct platform_device *pdev)
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
+ bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
@@ -2731,16 +2732,13 @@ static int scmi_probe(struct platform_device *pdev)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
- bool coex =
- IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
-
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
goto clear_dev_req_notifier;
- /* Bail out anyway when coex enabled */
- return ret;
+ /* Bail out anyway when coex disabled. */
+ return 0;
}
/* Coex enabled, carry on in any case. */
@@ -2764,6 +2762,8 @@ static int scmi_probe(struct platform_device *pdev)
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
+ if (coex)
+ return 0;
goto notification_exit;
}
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 0d9c9538b7f4..112c285deb97 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx)
"#mbox-cells", idx, NULL);
}
+static int mailbox_chan_validate(struct device *cdev)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ /* SCMI Tx and Rx shared mem areas have to be distinct */
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ return ret;
+}
+
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
@@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
resource_size_t size;
struct resource res;
+ ret = mailbox_chan_validate(cdev);
+ if (ret)
+ return ret;
+
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index f54e6fdf08e2..f80a9af3d16e 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -215,6 +215,14 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
}
}
+static bool __initdata fb_probed;
+
+void __init efi_earlycon_reprobe(void)
+{
+ if (fb_probed)
+ setup_earlycon("efifb");
+}
+
static int __init efi_earlycon_setup(struct earlycon_device *device,
const char *opt)
{
@@ -222,15 +230,17 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
u16 xres, yres;
u32 i;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ fb_wb = opt && !strcmp(opt, "ram");
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
+ fb_probed = true;
return -ENODEV;
+ }
fb_base = screen_info.lfb_base;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- fb_wb = opt && !strcmp(opt, "ram");
-
si = &screen_info;
xres = si->lfb_width;
yres = si->lfb_height;
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 2c16080e1f71..ef0820f1a924 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -72,6 +72,9 @@ static void __init init_screen_info(void)
if (memblock_is_map_memory(screen_info.lfb_base))
memblock_mark_nomap(screen_info.lfb_base,
screen_info.lfb_size);
+
+ if (IS_ENABLED(CONFIG_EFI_EARLYCON))
+ efi_earlycon_reprobe();
}
}
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 43e9a4cab9f5..ccdd6a130d98 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,4 +44,4 @@ OBJCOPYFLAGS_vmlinuz.efi := -O binary
$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
$(call if_changed,objcopy)
-targets += zboot-header.o vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index d4a6b12a8741..770b8ecb7398 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -85,8 +85,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
}
- if (image->image_base != _text)
+ if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+ image->image_base = _text;
+ }
if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
@@ -139,6 +141,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index 399770266372..8aad8c49d43f 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -16,20 +16,43 @@
static bool system_needs_vamap(void)
{
- const u8 *type1_family = efi_get_smbios_string(1, family);
+ const struct efi_smbios_type4_record *record;
+ const u32 __aligned(1) *socid;
+ const u8 *version;
/*
* Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
- * SetVirtualAddressMap() has not been called prior.
+ * SetVirtualAddressMap() has not been called prior. Most Altra systems
+ * can be identified by the SMCCC soc ID, which is conveniently exposed
+ * via the type 4 SMBIOS records. Otherwise, test the processor version
+ * field. eMAG systems all appear to have the processor version field
+ * set to "eMAG".
*/
- if (!type1_family || (
- strcmp(type1_family, "eMAG") &&
- strcmp(type1_family, "Altra") &&
- strcmp(type1_family, "Altra Max")))
+ record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
+ if (!record)
return false;
- efi_warn("Working around broken SetVirtualAddressMap()\n");
- return true;
+ socid = (u32 *)record->processor_id;
+ switch (*socid & 0xffff000f) {
+ static char const altra[] = "Ampere(TM) Altra(TM) Processor";
+ static char const emag[] = "eMAG";
+
+ default:
+ version = efi_get_smbios_string(&record->header, 4,
+ processor_version);
+ if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
+ strncmp(version, emag, sizeof(emag) - 1)))
+ break;
+
+ fallthrough;
+
+ case 0x0a160001: // Altra
+ case 0x0a160002: // Altra Max
+ efi_warn("Working around broken SetVirtualAddressMap()\n");
+ return true;
+ }
+
+ return false;
}
efi_status_t check_platform_features(void)
diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c
index 5245c4f031c0..cc4dcaea67fa 100644
--- a/drivers/firmware/efi/libstub/efi-stub-entry.c
+++ b/drivers/firmware/efi/libstub/efi-stub-entry.c
@@ -5,6 +5,15 @@
#include "efistub.h"
+static unsigned long screen_info_offset;
+
+struct screen_info *alloc_screen_info(void)
+{
+ if (IS_ENABLED(CONFIG_ARM))
+ return __alloc_screen_info();
+ return (void *)&screen_info + screen_info_offset;
+}
+
/*
* EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
* LoongArch. This is the entrypoint that is described in the PE/COFF header
@@ -56,6 +65,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
return status;
}
+ screen_info_offset = image_addr - (unsigned long)image->image_base;
+
status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
efi_free(image_size, image_addr);
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 2955c1ac6a36..f9c1e8a2bd1d 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -47,11 +47,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
-struct screen_info * __weak alloc_screen_info(void)
-{
- return &screen_info;
-}
-
void __weak free_screen_info(struct screen_info *si)
{
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 6bd3bb86d967..148013bcb5f8 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1062,6 +1062,7 @@ efi_enable_reset_attack_mitigation(void) { }
void efi_retrieve_tpm2_eventlog(void);
struct screen_info *alloc_screen_info(void);
+struct screen_info *__alloc_screen_info(void);
void free_screen_info(struct screen_info *si);
void efi_cache_sync_image(unsigned long image_base,
@@ -1074,6 +1075,8 @@ struct efi_smbios_record {
u16 handle;
};
+const struct efi_smbios_record *efi_get_smbios_record(u8 type);
+
struct efi_smbios_type1_record {
struct efi_smbios_record header;
@@ -1087,14 +1090,46 @@ struct efi_smbios_type1_record {
u8 family;
};
-#define efi_get_smbios_string(__type, __name) ({ \
- int size = sizeof(struct efi_smbios_type ## __type ## _record); \
+struct efi_smbios_type4_record {
+ struct efi_smbios_record header;
+
+ u8 socket;
+ u8 processor_type;
+ u8 processor_family;
+ u8 processor_manufacturer;
+ u8 processor_id[8];
+ u8 processor_version;
+ u8 voltage;
+ u16 external_clock;
+ u16 max_speed;
+ u16 current_speed;
+ u8 status;
+ u8 processor_upgrade;
+ u16 l1_cache_handle;
+ u16 l2_cache_handle;
+ u16 l3_cache_handle;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 core_count;
+ u8 enabled_core_count;
+ u8 thread_count;
+ u16 processor_characteristics;
+ u16 processor_family2;
+ u16 core_count2;
+ u16 enabled_core_count2;
+ u16 thread_count2;
+ u16 thread_enabled;
+};
+
+#define efi_get_smbios_string(__record, __type, __name) ({ \
int off = offsetof(struct efi_smbios_type ## __type ## _record, \
__name); \
- __efi_get_smbios_string(__type, off, size); \
+ __efi_get_smbios_string((__record), __type, off); \
})
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset);
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 1692d19ae80f..32c7a54923b4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -101,6 +101,7 @@ efi_status_t efi_random_alloc(unsigned long size,
* to calculate the randomly chosen address, and allocate it directly
* using EFI_ALLOCATE_ADDRESS.
*/
+ status = EFI_OUT_OF_RESOURCES;
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
efi_memory_desc_t *md = (void *)map->map + map_offset;
efi_physical_addr_t target;
diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
index 8e76a8b384ba..4be1c4d1f922 100644
--- a/drivers/firmware/efi/libstub/screen_info.c
+++ b/drivers/firmware/efi/libstub/screen_info.c
@@ -15,18 +15,11 @@
* early, but it only works if the EFI stub is part of the core kernel image
* itself. The zboot decompressor can only use the configuration table
* approach.
- *
- * In order to support both methods from the same build of the EFI stub
- * library, provide this dummy global definition of struct screen_info. If it
- * is required to satisfy a link dependency, it means we need to override the
- * __weak alloc and free methods with the ones below, and those will be pulled
- * in as well.
*/
-struct screen_info screen_info;
static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(void)
+struct screen_info *__alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
index 460418b7f5f5..c217de2cc8d5 100644
--- a/drivers/firmware/efi/libstub/smbios.c
+++ b/drivers/firmware/efi/libstub/smbios.c
@@ -22,21 +22,30 @@ struct efi_smbios_protocol {
u8 minor_version;
};
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+const struct efi_smbios_record *efi_get_smbios_record(u8 type)
{
struct efi_smbios_record *record;
efi_smbios_protocol_t *smbios;
efi_status_t status;
u16 handle = 0xfffe;
- const u8 *strtable;
status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
(void **)&smbios) ?:
efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
if (status != EFI_SUCCESS)
return NULL;
+ return record;
+}
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset)
+{
+ const u8 *strtable;
+
+ if (!record)
+ return NULL;
- strtable = (u8 *)record + recsize;
+ strtable = (u8 *)record + record->length;
for (int i = 1; i < ((u8 *)record)[offset]; i++) {
int len = strlen(strtable);
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index ec4525d40e0c..445cb646eaaa 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short 0
+ .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index ba234e062a1a..6105e5e2eda4 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -57,6 +57,11 @@ void __weak efi_cache_sync_image(unsigned long image_base,
// executable code loaded into memory to be safe for execution.
}
+struct screen_info *alloc_screen_info(void)
+{
+ return __alloc_screen_info();
+}
+
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index f06fdacc9bc8..456d0e5eaf78 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"IdeaPad Duet 3 10IGL5"),
},
},
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ },
+ },
{},
};
@@ -341,7 +349,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
+__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
@@ -355,7 +363,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
+}
+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 468d4d5ab550..b1e11f85b805 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1479,7 +1479,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
init_completion(&__scm->waitq_comp);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
if (irq != -ENXIO)
return irq;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 3fd3563d962b..3c197db42c9d 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
if (disabled)
goto unlock_mutex;
+ sysfb_apply_efi_quirks();
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
@@ -107,7 +109,7 @@ static __init int sysfb_init(void)
goto unlock_mutex;
}
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index ce9c007ed66f..82c64cb9f531 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -141,7 +141,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
if (!pd)
return ERR_PTR(-ENOMEM);
- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
if (ret)
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index acd83d29c866..ce86a1850305 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
}
/* Add new entry if not present */
- feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
if (!feature_data)
return -ENOMEM;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d8a421ce26a8..31ae0adbb295 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
+ if (acpi_quirk_skip_gpio_event_handlers())
+ return;
+
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 5d1e28218020..12adca8c7819 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -3,6 +3,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
+ depends on !UML
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 1d72cbc85348..5f9ac1bcb6bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
- amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
@@ -148,6 +148,7 @@ amdgpu-y += \
sdma_v3_0.o \
sdma_v4_0.o \
sdma_v4_4.o \
+ sdma_v4_4_2.o \
sdma_v5_0.o \
sdma_v5_2.o \
sdma_v6_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dda88090f044..8cf2cc50b3de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -50,7 +50,6 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
@@ -1005,7 +1004,6 @@ struct amdgpu_device {
bool in_runpm;
bool has_pr3;
- bool pm_sysfs_en;
bool ucode_sysfs_en;
bool psp_sysfs_en;
@@ -1095,18 +1093,14 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data);
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data);
-
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1254,6 +1248,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
@@ -1373,10 +1368,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1387,11 +1384,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index d4196fcb85a0..60b1857f469e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return true;
}
+
+/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+#else
+ return true;
+#endif
+}
+
/*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
*
@@ -1043,24 +1066,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
}
/**
- * amdgpu_acpi_should_gpu_reset
- *
- * @adev: amdgpu_device_pointer
- *
- * returns true if should reset GPU, false if not
- */
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- if (amdgpu_sriov_vf(adev))
- return false;
-
- return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
-}
-
-/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 333780491867..01ba3589b60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -308,6 +308,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dmabuf);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d6320c836251..c87515210c4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -711,6 +711,21 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
}
}
+static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
+{
+ if (!mem->dmabuf) {
+ struct dma_buf *ret = amdgpu_gem_prime_export(
+ &mem->bo->tbo.base,
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DRM_RDWR : 0);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ mem->dmabuf = ret;
+ }
+
+ return 0;
+}
+
static int
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
@@ -718,16 +733,9 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct drm_gem_object *gobj;
int ret;
- if (!mem->dmabuf) {
- mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
- mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0);
- if (IS_ERR(mem->dmabuf)) {
- ret = PTR_ERR(mem->dmabuf);
- mem->dmabuf = NULL;
- return ret;
- }
- }
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ return ret;
gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
if (IS_ERR(gobj))
@@ -1575,7 +1583,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
- size_t available;
+ ssize_t available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
@@ -1584,6 +1592,9 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ if (available < 0)
+ available = 0;
+
return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
@@ -2210,30 +2221,27 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
int ret;
- if (dma_buf->ops != &amdgpu_dmabuf_ops)
- /* Can't handle non-graphics buffers */
- return -EINVAL;
-
- obj = dma_buf->priv;
- if (drm_to_adev(obj->dev) != adev)
- /* Can't handle buffers from other devices */
- return -EINVAL;
+ obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT)))
+ AMDGPU_GEM_DOMAIN_GTT))) {
/* Only VRAM and GTT BOs are supported */
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_obj;
+ }
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err_put_obj;
+ }
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
- if (ret) {
- kfree(*mem);
- return ret;
- }
+ if (ret)
+ goto err_free_mem;
if (size)
*size = amdgpu_bo_size(bo);
@@ -2250,7 +2258,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- drm_gem_object_get(&bo->tbo.base);
+ get_dma_buf(dma_buf);
+ (*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
@@ -2262,6 +2271,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
(*mem)->is_imported = true;
return 0;
+
+err_free_mem:
+ kfree(*mem);
+err_put_obj:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dma_buf)
+{
+ int ret;
+
+ mutex_lock(&mem->lock);
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ goto out;
+
+ get_dma_buf(mem->dmabuf);
+ *dma_buf = mem->dmabuf;
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
}
/* Evict a userptr BO by stopping the queues if necessary
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a4e2fe6681..f5658359ff5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -35,6 +35,7 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
+#include <linux/apple-gmux.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
@@ -80,6 +81,10 @@
#include <drm/drm_drv.h>
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -675,20 +680,20 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
* amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u32 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u32 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -706,20 +711,20 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
* amdgpu_device_indirect_rreg64 - read a 64bits indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u64 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u64 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -749,13 +754,15 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -778,13 +785,15 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -803,6 +812,18 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_get_rev_id - query device rev_id
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return device rev_id
+ */
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
+{
+ return adev->nbio.funcs->get_rev_id(adev);
+}
+
+/**
* amdgpu_invalid_rreg - dummy reg read function
*
* @adev: amdgpu_device pointer
@@ -1356,6 +1377,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
return pcie_aspm_enabled(adev->pdev);
}
+bool amdgpu_device_aspm_support_quirk(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
@@ -3773,8 +3805,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->pdev);
-
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -3864,11 +3894,8 @@ fence_driver_init:
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
r = amdgpu_pm_sysfs_init(adev);
- if (r) {
- adev->pm_sysfs_en = false;
- DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- } else
- adev->pm_sysfs_en = true;
+ if (r)
+ DRM_ERROR("registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
@@ -3930,12 +3957,15 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_px(ddev)) {
- px = true;
+ px = amdgpu_device_supports_px(ddev);
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
+
+ if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- }
if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
@@ -4011,7 +4041,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
if (adev->mman.initialized)
drain_workqueue(adev->mman.bdev.wq);
- if (adev->pm_sysfs_en)
+ if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
@@ -4039,6 +4069,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
int idx;
+ bool px;
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
@@ -4057,10 +4088,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_device_supports_px(adev_to_drm(adev))) {
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
+
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
- }
+
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);
@@ -4145,8 +4182,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
- drm_kms_helper_poll_disable(dev);
-
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
@@ -4243,8 +4278,6 @@ exit:
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
- drm_kms_helper_poll_enable(dev);
-
amdgpu_ras_resume(adev);
if (adev->mode_info.num_crtc) {
@@ -5582,7 +5615,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -5598,7 +5631,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b719852daa07..77a8b05d3868 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -41,6 +41,7 @@
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
@@ -543,6 +544,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
struct harvest_table *harvest_info;
u16 offset;
int i;
+ uint32_t umc_harvest_config = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
@@ -570,12 +572,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++;
break;
default:
break;
}
}
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
}
/* ================================================== */
@@ -705,7 +712,7 @@ static void ip_hw_instance_release(struct kobject *kobj)
kfree(ip_hw_instance);
}
-static struct kobj_type ip_hw_instance_ktype = {
+static const struct kobj_type ip_hw_instance_ktype = {
.release = ip_hw_instance_release,
.sysfs_ops = &ip_hw_instance_sysfs_ops,
.default_groups = ip_hw_instance_groups,
@@ -724,7 +731,7 @@ static void ip_hw_id_release(struct kobject *kobj)
kfree(ip_hw_id);
}
-static struct kobj_type ip_hw_id_ktype = {
+static const struct kobj_type ip_hw_id_ktype = {
.release = ip_hw_id_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -787,18 +794,18 @@ static const struct sysfs_ops ip_die_entry_sysfs_ops = {
.show = ip_die_entry_attr_show,
};
-static struct kobj_type ip_die_entry_ktype = {
+static const struct kobj_type ip_die_entry_ktype = {
.release = ip_die_entry_release,
.sysfs_ops = &ip_die_entry_sysfs_ops,
.default_groups = ip_die_entry_groups,
};
-static struct kobj_type die_kobj_ktype = {
+static const struct kobj_type die_kobj_ktype = {
.release = die_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
-static struct kobj_type ip_discovery_ktype = {
+static const struct kobj_type ip_discovery_ktype = {
.release = ip_disc_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -1156,8 +1163,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
AMDGPU_MAX_SDMA_INSTANCES);
}
- if (le16_to_cpu(ip->hw_id) == UMC_HWID)
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
for (k = 0; k < num_base_address; k++) {
/*
@@ -1583,6 +1592,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -1641,6 +1651,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
@@ -1834,6 +1845,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
@@ -2296,6 +2310,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
adev->hdp.funcs = &hdp_v4_0_funcs;
break;
case IP_VERSION(5, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 503f89a766c3..d60fe7eb5579 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int r;
+ drm_kms_helper_poll_disable(dev);
+
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
@@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
drm_modeset_unlock_all(dev);
+ drm_kms_helper_poll_enable(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f5ffca24def4..ba5def374368 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = false;
if (r)
return r;
- return amdgpu_asic_reset(adev);
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+ return 0;
}
static int amdgpu_pmops_thaw(struct device *dev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 99a7855ab1bc..c57252f004e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -60,12 +60,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ struct amdgpu_mem_stats stats;
ktime_t usage[AMDGPU_HW_IP_NUM];
uint32_t bus, dev, fn, domain;
unsigned int hw_ip;
int ret;
+ memset(&stats, 0, sizeof(stats));
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
@@ -75,7 +76,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
if (ret)
return;
- amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_vm_get_memory(vm, &stats);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
@@ -90,9 +91,22 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
- seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
- seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
- seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+ seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
+ seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
+ seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
+ seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n",
+ stats.visible_vram/1024UL);
+ seq_printf(m, "amd-evicted-vram:\t%llu KiB\n",
+ stats.evicted_vram/1024UL);
+ seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n",
+ stats.evicted_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-vram:\t%llu KiB\n",
+ stats.requested_vram/1024UL);
+ seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n",
+ stats.requested_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-gtt:\t%llu KiB\n",
+ stats.requested_gtt/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index faff4a3f96e6..f52d0ba91a77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) {
+ struct amdgpu_job *job;
+
+ /* For non-scheduler bad job, i.e. failed ib test, we need to signal
+ * it right here or we won't be able to track them in fence_drv
+ * and they will remain unsignaled during sa_bo free.
+ */
+ job = container_of(old, struct amdgpu_job, hw_fence);
+ if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 35ed46b9249c..c50d59855011 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -725,7 +725,7 @@ int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
/* If not define special ras_late_init function, use gfx default ras_late_init */
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
/* If not defined special ras_cb function, use default ras_cb */
if (!ras->ras_block.ras_cb)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 12a6826caef4..655fc8bf936d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -447,13 +447,42 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
} while (fault->timestamp < tmp);
}
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
{
- if (!adev->gmc.xgmi.connected_to_cpu) {
- adev->gmc.xgmi.ras = &xgmi_ras;
- amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
- adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm;
- }
+ int r;
+
+ /* umc ras block */
+ r = amdgpu_umc_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mmhub ras block */
+ r = amdgpu_mmhub_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* hdp ras block */
+ r = amdgpu_hdp_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mca.x ras block */
+ r = amdgpu_mca_mp0_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mp1_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mpio_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* xgmi ras block */
+ r = amdgpu_xgmi_ras_sw_init(adev);
+ if (r)
+ return r;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 0305b660cd17..232523e3e270 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -104,6 +104,8 @@ struct amdgpu_vmhub {
uint32_t vm_cntx_cntl_vm_fault;
uint32_t vm_l2_bank_select_reserved_cid2;
+ uint32_t vm_contexts_disable;
+
const struct amdgpu_vmhub_funcs *vmhub_funcs;
};
@@ -351,7 +353,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
new file mode 100644
index 000000000000..b6cf801939aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_hdp_ras *ras;
+
+ if (!adev->hdp.ras)
+ return 0;
+
+ ras = adev->hdp.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register hdp ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "hdp");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->hdp.ras_if = &ras->ras_block.ras_comm;
+
+ /* hdp ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index ac5c61d3de2b..7b8a6152dc8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -43,5 +43,5 @@ struct amdgpu_hdp {
struct amdgpu_hdp_ras *ras;
};
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6f81ed4fb0d9..479d9bcc99ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -236,19 +236,28 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
-void jpeg_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_jpeg_ras *ras;
+
if (!adev->jpeg.ras)
- return;
+ return 0;
- amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
+ ras = adev->jpeg.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register jpeg ras block!\n");
+ return err;
+ }
- strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
- adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
- adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
+ strcpy(ras->ras_block.ras_comm.name, "jpeg");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &ras->ras_block.ras_comm;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->jpeg.ras->ras_block.ras_late_init)
- adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index e8ca3e32ad52..0ca76f0f23e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -72,6 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void jpeg_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 51c2a82e2fa4..8d9ff9e151de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -70,3 +70,75 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
amdgpu_mca_reset_error_count(adev, mc_status_addr);
}
+
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp0.ras)
+ return 0;
+
+ ras = adev->mca.mp0.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp1.ras)
+ return 0;
+
+ ras = adev->mca.mp1.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mpio.ras)
+ return 0;
+
+ ras = adev->mca.mpio.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index 7ce16d16e34b..997a073e2409 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -30,12 +30,7 @@ struct amdgpu_mca_ras {
struct amdgpu_mca_ras_block *ras;
};
-struct amdgpu_mca_funcs {
- void (*init)(struct amdgpu_device *adev);
-};
-
struct amdgpu_mca {
- const struct amdgpu_mca_funcs *funcs;
struct amdgpu_mca_ras mp0;
struct amdgpu_mca_ras mp1;
struct amdgpu_mca_ras mpio;
@@ -55,5 +50,7 @@ void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status);
-
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..0f6b1021fef3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mmhub_ras *ras;
+
+ if (!adev->mmhub.ras)
+ return 0;
+
+ ras = adev->mmhub.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mmhub ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mmhub");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if = &ras->ras_block.ras_comm;
+
+ /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 93430d3823c9..d21bb6dae56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -48,5 +48,7 @@ struct amdgpu_mmhub {
struct amdgpu_mmhub_ras *ras;
};
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 37d779b8e4a6..a3bc00577a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -22,6 +22,29 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_nbio_ras *ras;
+
+ if (!adev->nbio.ras)
+ return 0;
+
+ ras = adev->nbio.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index a240336bbc6b..c686ff4bcc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -106,5 +106,6 @@ struct amdgpu_nbio {
struct amdgpu_nbio_ras *ras;
};
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 858d881ab9a1..2bd1a54ee866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1265,24 +1265,41 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats)
{
unsigned int domain;
+ uint64_t size = amdgpu_bo_size(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- *vram_mem += amdgpu_bo_size(bo);
+ stats->vram += size;
+ if (amdgpu_bo_in_cpu_visible_vram(bo))
+ stats->visible_vram += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
- *gtt_mem += amdgpu_bo_size(bo);
+ stats->gtt += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
- *cpu_mem += amdgpu_bo_size(bo);
+ stats->cpu += size;
break;
}
+
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->requested_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->requested_visible_vram += size;
+
+ if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->evicted_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->evicted_visible_vram += size;
+ }
+ } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
+ stats->requested_gtt += size;
+ }
}
/**
@@ -1315,7 +1332,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
- adev->in_suspend || adev->shutdown)
+ adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5a85726ce853..35b8106816a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -126,6 +126,27 @@ struct amdgpu_bo_vm {
struct amdgpu_vm_bo_base entries[];
};
+struct amdgpu_mem_stats {
+ /* current VRAM usage, includes visible VRAM */
+ uint64_t vram;
+ /* current visible VRAM usage */
+ uint64_t visible_vram;
+ /* current GTT usage */
+ uint64_t gtt;
+ /* current system memory usage */
+ uint64_t cpu;
+ /* sum of evicted buffers, includes visible VRAM */
+ uint64_t evicted_vram;
+ /* sum of evicted buffers due to CPU access */
+ uint64_t evicted_visible_vram;
+ /* how much userspace asked for, includes vis.VRAM */
+ uint64_t requested_vram;
+ /* how much userspace asked for */
+ uint64_t requested_visible_vram;
+ /* how much userspace asked for */
+ uint64_t requested_gtt;
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -325,8 +346,8 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 28fe6d941054..02f948adae72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -191,6 +191,7 @@ static int psp_early_init(void *handle)
psp_v12_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 1):
@@ -602,27 +603,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
- int index, idx;
+ int index;
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
- bool dev_entered;
if (psp->adev->no_hw_access)
return 0;
- dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
- /*
- * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
- * a lock in drm_dev_enter during driver unload because we must call
- * drm_dev_unplug as the beginning of unload driver sequence . It is very
- * crucial that userspace can't access device instances anymore.
- */
- if (!dev_entered)
- WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
- psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA &&
- psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD);
-
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -686,8 +674,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
}
exit:
- if (dev_entered)
- drm_dev_exit(idx);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 63dfcc98152d..11df6ee052b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2554,21 +2554,24 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* initialize nbio ras function ahead of any other
* ras functions so hardware fatal error interrupt
* can be enabled as early as possible */
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ if (!adev->gmc.xgmi.connected_to_cpu)
adev->nbio.ras = &nbio_v7_4_ras;
- amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
- adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
- }
break;
default:
/* nbio ras is not available */
break;
}
+ /* nbio ras block needs to be enabled ahead of other ras blocks
+ * to handle fatal error */
+ r = amdgpu_nbio_ras_sw_init(adev);
+ if (r)
+ return r;
+
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt) {
r = adev->nbio.ras->init_ras_controller_interrupt(adev);
@@ -3073,9 +3076,6 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
if (!adev || !ras_block_obj)
return -EINVAL;
- if (!amdgpu_ras_asic_supported(adev))
- return 0;
-
ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
if (!ras_node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 1b8574bc4463..da68ceaa024c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -208,6 +208,36 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
}
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_umc_ras *ras;
+
+ if (!adev->umc.ras)
+ return 0;
+
+ ras = adev->umc.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register umc ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ if (ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+
+ return 0;
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index f2bf979af588..d7f1229ff11f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -42,7 +42,7 @@
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
#define LOOP_UMC_NODE_INST(node_inst) \
- for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++)
+ for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
@@ -69,7 +69,7 @@ struct amdgpu_umc {
/* number of umc instance with memory map register access */
uint32_t umc_inst_num;
- /*number of umc node instance with memory map register access*/
+ /* Total number of umc node instance including harvest one */
uint32_t node_inst_num;
/* UMC regiser per channel offset */
@@ -82,8 +82,12 @@ struct amdgpu_umc {
const struct amdgpu_umc_funcs *funcs;
struct amdgpu_umc_ras *ras;
+
+ /* active mask for umc node instance */
+ unsigned long active_mask;
};
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 632a6ded5735..6887109abb13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1118,14 +1118,11 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
+ uint32_t offset, data[4];
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint32_t data[4];
uint64_t addr;
- long r;
- int i;
- unsigned offset_idx = 0;
- unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
+ int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
@@ -1134,16 +1131,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (adev->asic_type >= CHIP_VEGA10) {
- offset_idx = 1 + ring->me;
- offset[1] = adev->reg_offset[UVD_HWIP][0][1];
- offset[2] = adev->reg_offset[UVD_HWIP][1][1];
- }
+ if (adev->asic_type >= CHIP_VEGA10)
+ offset = adev->reg_offset[UVD_HWIP][ring->me][1];
+ else
+ offset = UVD_BASE_SI;
- data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
- data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
- data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
- data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
+ data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0);
+ data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0);
+ data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0);
+ data[3] = PACKET0(offset + UVD_NO_OP, 0);
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
@@ -1160,14 +1156,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout(bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, false,
- msecs_to_jiffies(10));
- if (r == 0)
- r = -ETIMEDOUT;
- if (r < 0)
- goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 25217b05c0ea..e63fcc58e8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -26,6 +26,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <drm/drm_drv.h>
@@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
+ /*
+ * Some Steam Deck's BIOS versions are incompatible with the
+ * indirect SRAM mode, leading to amdgpu being unable to get
+ * properly probed (and even potentially crashing the kernel).
+ * Hence, check for these versions here - notice this is
+ * restricted to Vangogh (Deck's APU).
+ */
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.indirect_sram = false;
+ dev_info(adev->dev,
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ }
+ }
+
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -1162,19 +1181,28 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_vcn_ras *ras;
+
if (!adev->vcn.ras)
- return;
+ return 0;
+
+ ras = adev->vcn.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register vcn ras block!\n");
+ return err;
+ }
- amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
+ strcpy(ras->ras_block.ras_comm.name, "vcn");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &ras->ras_block.ras_comm;
- strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
- adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
- adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->vcn.ras->ras_block.ras_late_init)
- adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d3e2af902907..c730949ece7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -400,6 +400,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b9e9480448af..4f7bab52282a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
/* Indirect Reg Access enabled */
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+ /* AV1 Support MODE*/
+ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void)
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b9441ab457ea..286e326bb4bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -867,6 +867,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pages_addr[idx - 1] + PAGE_SIZE))
break;
}
+ if (!contiguous)
+ count--;
num_entries = count *
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
@@ -918,8 +920,8 @@ error_unlock:
return r;
}
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats)
{
struct amdgpu_bo_va *bo_va, *tmp;
@@ -927,41 +929,36 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
spin_unlock(&vm->status_lock);
}
+
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 856a64bc7a89..6f085f0b4ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -40,6 +40,7 @@ struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
+struct amdgpu_mem_stats;
/*
* GPUVM handling
@@ -457,8 +458,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 4340d08f7607..3fe24348d199 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -308,7 +308,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
.show = amdgpu_xgmi_show_attrs,
};
-struct kobj_type amdgpu_xgmi_hive_type = {
+static const struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
.default_groups = amdgpu_xgmi_hive_groups,
@@ -1048,12 +1048,30 @@ struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
struct amdgpu_xgmi_ras xgmi_ras = {
.ras_block = {
- .ras_comm = {
- .name = "xgmi_wafl",
- .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &xgmi_ras_hw_ops,
.ras_late_init = amdgpu_xgmi_ras_late_init,
},
};
+
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_xgmi_ras *ras;
+
+ if (!adev->gmc.xgmi.ras)
+ return 0;
+
+ ras = adev->gmc.xgmi.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl_pcs");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 30dcc1681b4e..86fbf56938f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -73,5 +73,6 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
adev->gmc.xgmi.hive_id &&
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 6c97148ca0ed..24d42d24e6a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
- uint32_t reserved : 26;
+ uint32_t av1_support : 1;
+ uint32_t reserved : 25;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 6983acc456b2..516409989235 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7266,7 +7266,6 @@ static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -7285,17 +7284,9 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
- /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- } else {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- }
-
+ /* Remove the steps of clearing KIQ position.
+ * It causes GFX hang when another Win guest is rendering.
+ */
return 0;
}
gfx_v10_0_cp_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 3bf697a80cf2..ecf8ceb53311 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle)
break;
}
+ /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
+ amdgpu_sriov_is_pp_one_vf(adev))
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false;
}
+static int gfx_v11_0_post_soft_reset(void *handle)
+{
+ /**
+ * GFX soft reset will impact MES, need resume MES when do GFX soft reset
+ */
+ return amdgpu_mes_resume((struct amdgpu_device *)handle);
+}
+
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset,
+ .post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index ab2556ca984e..d99821692ba3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -699,25 +699,8 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
-
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
@@ -754,7 +737,6 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static int gmc_v10_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
@@ -770,10 +752,6 @@ static int gmc_v10_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1024,6 +1002,10 @@ static int gmc_v10_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 85e0afc3d4f7..fad199ed15f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
case IP_VERSION(8, 10, 0):
adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
- adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
@@ -582,23 +581,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not define special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
@@ -847,6 +829,10 @@ static int gmc_v11_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -876,6 +862,12 @@ static int gmc_v11_0_sw_fini(void *handle)
static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32(hub->vm_contexts_disable, 0);
+ return;
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b06170c00dfc..2a8dc9b52c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1318,23 +1318,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
@@ -1368,15 +1351,6 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
/* mmhub ras is not available */
break;
}
-
- if (adev->mmhub.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
-
- strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
- adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
- adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm;
- }
}
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
@@ -1387,26 +1361,34 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
{
adev->hdp.ras = &hdp_v4_0_ras;
- amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
- adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm;
}
-static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
{
+ struct amdgpu_mca *mca = &adev->mca;
+
/* is UMC the right IP to check for MCA? Maybe DF? */
switch (adev->ip_versions[UMC_HWIP][0]) {
case IP_VERSION(6, 7, 0):
- if (!adev->gmc.xgmi.connected_to_cpu)
- adev->mca.funcs = &mca_v3_0_funcs;
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ mca->mp0.ras = &mca_v3_0_mp0_ras;
+ mca->mp1.ras = &mca_v3_0_mp1_ras;
+ mca->mpio.ras = &mca_v3_0_mpio_ras;
+ }
break;
default:
break;
}
}
+static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->gmc.xgmi.ras = &xgmi_ras;
+}
+
static int gmc_v9_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
@@ -1427,7 +1409,8 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_mmhub_ras_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
gmc_v9_0_set_hdp_ras_funcs(adev);
- gmc_v9_0_set_mca_funcs(adev);
+ gmc_v9_0_set_mca_ras_funcs(adev);
+ gmc_v9_0_set_xgmi_ras_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -1436,10 +1419,6 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1644,8 +1623,6 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gfxhub.funcs->init(adev);
adev->mmhub.funcs->init(adev);
- if (adev->mca.funcs)
- adev->mca.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -1798,6 +1775,10 @@ static int gmc_v9_0_sw_init(void *handle)
gmc_v9_0_save_registers(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index adf89680f53e..71d1a2e3bac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,7 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2))
return;
if (!ring || !ring->funcs->emit_wreg)
@@ -160,11 +161,6 @@ struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
struct amdgpu_hdp_ras hdp_v4_0_ras = {
.ras_block = {
- .ras_comm = {
- .name = "hdp",
- .block = AMDGPU_RAS_BLOCK__HDP,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &hdp_v4_0_ras_hw_ops,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index f2b743a93915..6b1887808782 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -138,6 +138,10 @@ static int jpeg_v2_5_sw_init(void *handle)
adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
}
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -806,6 +810,4 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 3beb731b2ce5..3129094baccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -113,6 +113,10 @@ static int jpeg_v4_0_sw_init(void *handle)
adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -685,6 +689,4 @@ static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index d4bd7d1d2649..6dae4a2e2767 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -51,19 +51,13 @@ static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
return -EINVAL;
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp0",
- },
.hw_ops = &mca_v3_0_mp0_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -77,19 +71,13 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp1",
- },
.hw_ops = &mca_v3_0_mp1_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -103,40 +91,14 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mpio",
- },
.hw_ops = &mca_v3_0_mpio_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
};
-
-
-static void mca_v3_0_init(struct amdgpu_device *adev)
-{
- struct amdgpu_mca *mca = &adev->mca;
-
- mca->mp0.ras = &mca_v3_0_mp0_ras;
- mca->mp1.ras = &mca_v3_0_mp1_ras;
- mca->mpio.ras = &mca_v3_0_mpio_ras;
- amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
- mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm;
- mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm;
- mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm;
-}
-
-const struct amdgpu_mca_funcs mca_v3_0_funcs = {
- .init = mca_v3_0_init,
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
index b899b86194c2..d3eaef0d7f2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
@@ -21,6 +21,8 @@
#ifndef __MCA_V3_0_H__
#define __MCA_V3_0_H__
-extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp0_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp1_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mpio_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 164948c50ac3..17a792616979 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -517,6 +517,9 @@ static void mmhub_v3_0_init(struct amdgpu_device *adev)
hub->vm_l2_bank_select_reserved_cid2 =
SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2);
+ hub->vm_contexts_disable =
+ SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE);
+
hub->vmhub_funcs = &mmhub_v3_0_vmhub_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 4b0d563c6522..4ef1fa4603c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
break;
- case IP_VERSION(7, 5, 1):
- data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
- data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
- WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
- fallthrough;
default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
break;
}
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 1):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+ data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+ break;
+ }
+
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index d972025f0d20..47420b403871 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -280,47 +280,6 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -444,9 +403,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
en = &nv_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = nv_get_register_value(adev,
@@ -560,24 +520,9 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void nv_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (!(adev->flags & AMD_IS_APU) &&
@@ -607,11 +552,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
-static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -737,10 +677,10 @@ static int nv_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &nv_pcie_rreg;
- adev->pcie_wreg = &nv_pcie_wreg;
- adev->pcie_rreg64 = &nv_pcie_rreg64;
- adev->pcie_wreg64 = &nv_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -753,7 +693,7 @@ static int nv_common_early_init(void *handle)
adev->asic_funcs = &nv_asic_funcs;
- adev->rev_id = nv_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -1054,8 +994,8 @@ static int nv_common_late_init(void *handle)
amdgpu_virt_update_sriov_video_codec(adev,
sriov_sc_video_codecs_encode_array,
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
- sriov_sc_video_codecs_decode_array_vcn1,
- ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ sriov_sc_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
}
}
@@ -1087,8 +1027,6 @@ static int nv_common_hw_init(void *handle)
if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
- /* enable pcie gen2/3 link */
- nv_pcie_gen3_enable(adev);
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index d62fcc77af95..caee76ab7110 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -100,6 +101,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
return err;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
err = psp_init_sos_microcode(psp, ucode_prefix);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
new file mode 100644
index 000000000000..1b04700a4d55
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -0,0 +1,1967 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+
+#include "sdma/sdma_4_4_2_offset.h"
+#include "sdma/sdma_4_4_2_sh_mask.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+#include "vega10_sdma_pkt_open.h"
+
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
+#include "amdgpu_ras.h"
+
+MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
+
+static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
+{
+ return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset);
+}
+
+static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
+{
+ switch (seq_num) {
+ case 0:
+ return SOC15_IH_CLIENTID_SDMA0;
+ case 1:
+ return SOC15_IH_CLIENTID_SDMA1;
+ case 2:
+ return SOC15_IH_CLIENTID_SDMA2;
+ case 3:
+ return SOC15_IH_CLIENTID_SDMA3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
+{
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ return 0;
+ case SOC15_IH_CLIENTID_SDMA1:
+ return 1;
+ case SOC15_IH_CLIENTID_SDMA2:
+ return 2;
+ case SOC15_IH_CLIENTID_SDMA3:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * sdma_v4_4_2_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) {
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdma_v4_4_2_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ u64 *rptr;
+
+ /* XXX check if swapping is necessary on BE */
+ rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
+
+ DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
+ return ((*rptr) >> 2);
+}
+
+/**
+ * sdma_v4_4_2_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ DRM_DEBUG("Setting write pointer\n");
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
+ }
+}
+
+static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->funcs->nop |
+ SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ * @flags: unused
+ *
+ * Schedule an IB in the DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+
+}
+
+static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
+ int mem_space, int hdp,
+ uint32_t addr0, uint32_t addr1,
+ uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ if (mem_space) {
+ /* memory */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ } else {
+ /* registers */
+ amdgpu_ring_write(ring, addr0 << 2);
+ amdgpu_ring_write(ring, addr1 << 2);
+ }
+ amdgpu_ring_write(ring, ref); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ ref_and_mask, ref_and_mask, 10);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed.
+ */
+static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ /* write the fence */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
+}
+
+
+/**
+ * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers.
+ */
+static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i, unset = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].ring;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = 1;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues.
+ */
+static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+}
+
+/**
+ * sdma_v4_4_2_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers.
+ */
+static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i;
+ bool unset = false;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].page;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
+ (!unset)) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = true;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch.
+ */
+static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
+ }
+ WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
+
+ /* Extend page fault timeout to avoid interrupt storm */
+ WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
+ }
+
+}
+
+/**
+ * sdma_v4_4_2_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines.
+ */
+static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl;
+ int i;
+
+ if (!enable) {
+ sdma_v4_4_2_gfx_stop(adev);
+ sdma_v4_4_2_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_stop(adev);
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
+ }
+}
+
+/*
+ * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
+ * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
+
+ sdma_v4_4_2_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+/**
+ * sdma_v4_4_2_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
+ sdma_v4_4_2_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
+{
+
+}
+
+/**
+ * sdma_v4_4_2_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
+{
+ sdma_v4_4_2_init_pg(adev);
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
+{
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+ int i, j;
+
+ /* halt the MEs */
+ sdma_v4_4_2_enable(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!adev->sdma.instance[i].fw)
+ return -EINVAL;
+
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+ amdgpu_ucode_print_sdma_hdr(&hdr->header);
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+
+ fw_data = (const __le32 *)
+ (adev->sdma.instance[i].fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
+
+ for (j = 0; j < fw_size; j++)
+ WREG32_SDMA(i, regSDMA_UCODE_DATA,
+ le32_to_cpup(fw_data++));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+ } else {
+ /* bypass sdma microcode loading on Gopher */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) {
+ r = sdma_v4_4_2_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_4_2_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ }
+
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_4_2_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, regSDMA_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ sdma_v4_4_2_enable(adev, true);
+ } else {
+ r = sdma_v4_4_2_rlc_resume(adev);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_alloc(ring, 5);
+ if (r)
+ goto error_free_wb;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Test a simple IB in the DMA ring.
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+ unsigned index;
+ long r;
+ u32 tmp = 0;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err0;
+
+ ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
+ ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.length_dw = 8;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err1;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
+
+err1:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+err0:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+
+/**
+ * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA.
+ */
+static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+}
+
+/**
+ * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using sDMA.
+ */
+static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw - 1;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
+ }
+}
+
+/**
+ * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA.
+ */
+static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
+ ib->ptr[ib->length_dw++] = upper_32_bits(flags);
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
+}
+
+/**
+ * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill with padding
+ */
+static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (-ib->length_dw) & 7;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+ SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
+ addr & 0xfffffffc,
+ upper_32_bits(addr) & 0xffffffff,
+ seq, 0xffffffff, 4);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA.
+ */
+static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
+static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
+}
+
+static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ return false;
+ default:
+ return false;
+ }
+}
+
+static int sdma_v4_4_2_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = sdma_v4_4_2_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if (sdma_v4_4_2_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
+ sdma_v4_4_2_set_ring_funcs(adev);
+ sdma_v4_4_2_set_buffer_funcs(adev);
+ sdma_v4_4_2_set_vm_pte_funcs(adev);
+ sdma_v4_4_2_set_irq_funcs(adev);
+
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+#endif
+
+static int sdma_v4_4_2_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+#if 0
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_4_2_process_ras_data_cb,
+ };
+#endif
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v4_4_2_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA SRAM ECC event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_VM_HOLE,
+ &adev->sdma.vm_hole_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
+ &adev->sdma.doorbell_invalid_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
+ &adev->sdma.pool_timeout_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
+ &adev->sdma.srbm_write_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
+ ring->use_doorbell?"true":"false");
+
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ ring->doorbell_index += 0x400;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int sdma_v4_4_2_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
+
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_4_2_init_golden_registers(adev);
+
+ r = sdma_v4_4_2_start(adev);
+
+ return r;
+}
+
+static int sdma_v4_4_2_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
+
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_fini(adev);
+}
+
+static int sdma_v4_4_2_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_init(adev);
+}
+
+static bool sdma_v4_4_2_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
+
+ if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
+ return false;
+ }
+
+ return true;
+}
+
+static int sdma_v4_4_2_wait_for_idle(void *handle)
+{
+ unsigned i, j;
+ u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ for (j = 0; j < adev->sdma.num_instances; j++) {
+ sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
+ if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
+ break;
+ }
+ if (j == adev->sdma.num_instances)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int sdma_v4_4_2_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance;
+
+ DRM_DEBUG("IH: SDMA trap\n");
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ goto out;
+
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+
+out:
+ return AMDGPU_RAS_SUCCESS;
+}
+#endif
+
+static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return 0;
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
+ return 0;
+}
+
+static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG);
+ /*
+ * FIXME: This was inherited from Aldebaran, but no this field
+ * definition in the regspec of both Aldebaran and SDMA 4.4.2
+ */
+ sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0;
+ WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+ struct amdgpu_task_info task_info;
+ u64 addr;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0 || instance >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance invalid %d\n", instance);
+ return -EINVAL;
+ }
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_dbg_ratelimited(adev->dev,
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static void sdma_v4_4_2_update_medium_grain_clock_gating(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ }
+}
+
+
+static void sdma_v4_4_2_update_medium_grain_light_sleep(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 1-not override: enable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 0-override:disable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ }
+}
+
+static int sdma_v4_4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ sdma_v4_4_2_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ sdma_v4_4_2_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static int sdma_v4_4_2_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_SDMA_MGCG */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL));
+ if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK))
+ *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
+
+ /* AMD_CG_SUPPORT_SDMA_LS */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL));
+ if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
+ *flags |= AMD_CG_SUPPORT_SDMA_LS;
+}
+
+const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
+ .name = "sdma_v4_4_2",
+ .early_init = sdma_v4_4_2_early_init,
+ .late_init = sdma_v4_4_2_late_init,
+ .sw_init = sdma_v4_4_2_sw_init,
+ .sw_fini = sdma_v4_4_2_sw_fini,
+ .hw_init = sdma_v4_4_2_hw_init,
+ .hw_fini = sdma_v4_4_2_hw_fini,
+ .suspend = sdma_v4_4_2_suspend,
+ .resume = sdma_v4_4_2_resume,
+ .is_idle = sdma_v4_4_2_is_idle,
+ .wait_for_idle = sdma_v4_4_2_wait_for_idle,
+ .soft_reset = sdma_v4_4_2_soft_reset,
+ .set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
+ .set_powergating_state = sdma_v4_4_2_set_powergating_state,
+ .get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_page_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs =
+ &sdma_v4_4_2_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
+ }
+}
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
+ .set = sdma_v4_4_2_set_trap_irq_state,
+ .process = sdma_v4_4_2_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
+ .process = sdma_v4_4_2_process_illegal_inst_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
+ .set = sdma_v4_4_2_set_ecc_irq_state,
+ .process = amdgpu_sdma_process_ecc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
+ .process = sdma_v4_4_2_process_vm_hole_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
+ .process = sdma_v4_4_2_process_doorbell_invalid_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
+ .process = sdma_v4_4_2_process_pool_timeout_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
+ .process = sdma_v4_4_2_process_srbm_write_irq,
+};
+
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+
+ adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
+ adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
+ adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
+ adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
+ adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
+ adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+}
+
+/**
+ * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
+ *
+ * Copy GPU buffers using the DMA engine.
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ bool tmz)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+}
+
+/**
+ * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine.
+ */
+static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+}
+
+static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
+ .copy_max_bytes = 0x400000,
+ .copy_num_dw = 7,
+ .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
+
+ .fill_max_bytes = 0x400000,
+ .fill_num_dw = 5,
+ .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
+};
+
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+}
+
+static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v4_4_2_vm_copy_pte,
+
+ .write_pte = sdma_v4_4_2_vm_write_pte,
+ .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
+};
+
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ struct drm_gpu_scheduler *sched;
+ unsigned i;
+
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 4,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v4_4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
index c26e7258a91c..4814e8a074d6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-// TODO - remove this file after external build dependencies is resolved.
-/* NOTE: This file is pending to be removed, do not add new code to this file */ \ No newline at end of file
+#ifndef __SDMA_V4_4_2_H__
+#define __SDMA_V4_4_2_H__
+
+extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 7cd17dda32ce..7d04c39332ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -191,47 +191,6 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -439,8 +398,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
@@ -650,24 +610,6 @@ static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc15_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -694,11 +636,6 @@ const struct amdgpu_ip_block_version vega10_common_ip_block =
.funcs = &soc15_common_ip_funcs,
};
-static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static void soc15_reg_base_init(struct amdgpu_device *adev)
{
/* Set IP register base before any HW register access */
@@ -935,10 +872,10 @@ static int soc15_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc15_pcie_rreg;
- adev->pcie_wreg = &soc15_pcie_wreg;
- adev->pcie_rreg64 = &soc15_pcie_rreg64;
- adev->pcie_wreg64 = &soc15_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
@@ -948,7 +885,7 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->rev_id = soc15_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xFF;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -1229,8 +1166,6 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 620f7409825d..67580761b44d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
+/* SRIOV SOC21, not const since data is controlled by host */
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
+
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -111,62 +164,38 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
- if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ case IP_VERSION(4, 0, 4):
+ if (amdgpu_sriov_vf(adev)) {
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
+ }
} else {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ }
}
return 0;
default:
return -EINVAL;
}
}
-/*
- * Indirect registers accessor
- */
-static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
@@ -291,9 +320,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i];
- if (adev->reg_offset[en->hwip][en->inst] &&
- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
+ if (!adev->reg_offset[en->hwip][en->inst])
+ continue;
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc21_get_register_value(adev,
@@ -410,21 +440,6 @@ static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc21_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -451,11 +466,6 @@ const struct amdgpu_ip_block_version soc21_common_ip_block =
.funcs = &soc21_common_ip_funcs,
};
-static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -580,10 +590,10 @@ static int soc21_common_early_init(void *handle)
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc21_pcie_rreg;
- adev->pcie_wreg = &soc21_pcie_wreg;
- adev->pcie_rreg64 = &soc21_pcie_rreg64;
- adev->pcie_wreg64 = &soc21_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -596,7 +606,7 @@ static int soc21_common_early_init(void *handle)
adev->asic_funcs = &soc21_asic_funcs;
- adev->rev_id = soc21_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
@@ -728,8 +738,23 @@ static int soc21_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
+ }
+ }
return 0;
}
@@ -753,8 +778,6 @@ static int soc21_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc21_pcie_gen3_enable(adev);
/* enable aspm */
soc21_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index 25eaf4af5fcf..c6dfd433fec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -31,9 +31,9 @@
/* number of umc instance with memory map register access */
#define UMC_V8_10_UMC_INSTANCE_NUM 2
-/* Total channel instances for all umc nodes */
+/* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
- (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num)
+ (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
/* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b0b0e69c6a94..223e7dfe4618 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -225,6 +225,10 @@ static int vcn_v2_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2031,6 +2035,4 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 43d587404c3e..720ab36f9c92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -181,6 +181,10 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2123,6 +2127,4 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 1706081d054d..827e2768f867 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -321,7 +321,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* psp firmware won't program IH_CHICKEN for aldebaran
* driver needs to program it properly according to
* MC_SPACE type in IH_RB_CNTL */
- if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) {
+ if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) ||
+ (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
@@ -551,12 +552,14 @@ static int vega20_ih_sw_init(void *handle)
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
/* initialize ih control registers offset */
vega20_ih_init_register_offset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 12ef782eb478..531f173ade2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,10 +81,6 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
-#if IS_ENABLED(CONFIG_X86)
-#include <asm/intel-family.h>
-#endif
-
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@@ -1105,24 +1101,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void vi_enable_aspm(struct amdgpu_device *adev)
{
u32 data, orig;
@@ -1138,24 +1116,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
-static bool aspm_support_quirk_check(void)
-{
-#if IS_ENABLED(CONFIG_X86)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
-#else
- return true;
-#endif
-}
-
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;
if (adev->flags & AMD_IS_APU ||
@@ -1743,8 +1710,6 @@ static int vi_common_hw_init(void *handle)
/* move the golden regs per IP block */
vi_init_golden_registers(adev);
- /* enable pcie gen2/3 link */
- vi_pcie_gen3_enable(adev);
/* enable aspm */
vi_program_aspm(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a0e30f21e12e..81d07ecf666d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ mutex_unlock(&p->mutex);
+
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1335,9 +1335,9 @@ get_process_device_data_failed:
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
@@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
+ bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
- if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ }
+ mutex_unlock(&p->mutex);
+ if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
@@ -1586,6 +1590,58 @@ err_unlock:
return r;
}
+static int kfd_ioctl_export_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_export_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ void *mem;
+ int ret = 0;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
+ mutex_unlock(&p->mutex);
+ if (ret)
+ goto err_out;
+
+ ret = dma_buf_fd(dmabuf, args->flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ goto err_out;
+ }
+ /* dma_buf_fd assigns the reference count to the fd, no need to
+ * put the reference here.
+ */
+ args->dmabuf_fd = ret;
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&p->mutex);
+err_out:
+ return ret;
+}
+
/* Handle requests for watching SMI events */
static int kfd_ioctl_smi_events(struct file *filep,
struct kfd_process *p, void *data)
@@ -2768,6 +2824,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
kfd_ioctl_get_available_memory, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
+ kfd_ioctl_export_dmabuf, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3de7f616a001..ec70a1658dc3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume_iommu(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
@@ -624,7 +625,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init(kfd->adev);
- if (kgd2kfd_resume_iommu(kfd))
+ if (kfd_resume_iommu(kfd))
goto device_iommu_error;
if (kfd_resume(kfd))
@@ -773,6 +774,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
+ if (!kfd->init_complete)
+ return 0;
+
+ return kfd_resume_iommu(kfd);
+}
+
+static int kfd_resume_iommu(struct kfd_dev *kfd)
+{
int err = 0;
err = kfd_iommu_resume(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cbef2e147da5..38c9e1ca6691 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
if (!pdd->doorbell_index) {
int r = kfd_alloc_process_doorbells(pdd->dev,
&pdd->doorbell_index);
- if (r)
+ if (r < 0)
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index de8ce72344fc..54933903bcb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
@@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t i, j;
int r;
- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
- prange->last);
+ pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
+ prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
- r = svm_range_vram_node_new(adev, prange, true);
- if (r) {
- dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
- goto out;
- }
-
- amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
@@ -391,14 +385,14 @@ out_free_vram_pages:
migrate->dst[i + 3] = 0;
}
#endif
-out:
+
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger)
+ uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
+ uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
@@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
+ r = svm_range_vram_node_new(adev, prange, true);
+ if (r) {
+ dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ return r;
+ }
+ ttm_res_offset = prange->offset << PAGE_SHIFT;
+
for (addr = start; addr < end;) {
unsigned long next;
@@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
+ ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
+ else
+ svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 09b966dc3768..aee2212e52f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -77,6 +77,7 @@ err_ioctl:
static void kfd_exit(void)
{
+ kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index bfa30d12406b..7e4d992e48b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
+void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7acd55a814b2..07a9eaf9b7d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -344,7 +344,7 @@ static const struct sysfs_ops kfd_procfs_ops = {
.show = kfd_procfs_show,
};
-static struct kobj_type procfs_type = {
+static const struct kobj_type procfs_type = {
.release = kfd_procfs_kobj_release,
.sysfs_ops = &kfd_procfs_ops,
};
@@ -469,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
-static struct kobj_type procfs_queue_type = {
+static const struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_groups = procfs_queue_groups,
};
@@ -478,7 +478,7 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct kobj_type procfs_stats_type = {
+static const struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = kfd_procfs_kobj_release,
};
@@ -487,7 +487,7 @@ static const struct sysfs_ops sysfs_counters_ops = {
.show = kfd_sysfs_counters_show,
};
-static struct kobj_type sysfs_counters_type = {
+static const struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = kfd_procfs_kobj_release,
};
@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
+ mmu_notifier_put(&p->mmu_notifier);
+}
+
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
+ /*
+ * Do early return if table is empty.
+ *
+ * This could potentially happen if this function is called concurrently
+ * by mmu_notifier and by kfd_cleanup_pocesses.
+ *
+ */
+ if (hash_empty(kfd_processes_table)) {
+ mutex_unlock(&kfd_processes_mutex);
+ return;
+ }
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
- /* Indicate to other users that MM is no longer valid */
- p->mm = NULL;
-
- mmu_notifier_put(&p->mmu_notifier);
+ kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released. We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+ struct kfd_process *p;
+ struct hlist_node *p_temp;
+ unsigned int temp;
+ HLIST_HEAD(cleanup_list);
+
+ /*
+ * Move all remaining kfd_process from the process table to a
+ * temp list for processing. Once done, callback from mmu_notifier
+ * release will not see the kfd_process in the table and do early return,
+ * avoiding double free issues.
+ */
+ mutex_lock(&kfd_processes_mutex);
+ hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+ hash_del_rcu(&p->kfd_processes);
+ synchronize_srcu(&kfd_processes_srcu);
+ hlist_add_head(&p->kfd_processes, &cleanup_list);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+
+ hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+ kfd_process_notifier_release_internal(p);
+
+ /*
+ * Ensures that all outstanding free_notifier get called, triggering
+ * the release of the kfd_process struct.
+ */
+ mmu_notifier_synchronize();
+}
+
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 5137476ec18e..4236539d9f93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
cleanup:
- if (dev->shared_resources.enable_mes)
- uninit_queue(*q);
+ uninit_queue(*q);
+ *q = NULL;
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 3fdaba56be6f..8e4124dcb6e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -278,7 +278,7 @@ static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
-static struct kobj_type sysprops_type = {
+static const struct kobj_type sysprops_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -318,7 +318,7 @@ static const struct sysfs_ops iolink_ops = {
.show = iolink_show,
};
-static struct kobj_type iolink_type = {
+static const struct kobj_type iolink_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -350,7 +350,7 @@ static const struct sysfs_ops mem_ops = {
.show = mem_show,
};
-static struct kobj_type mem_type = {
+static const struct kobj_type mem_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -395,7 +395,7 @@ static const struct sysfs_ops cache_ops = {
.show = kfd_cache_show,
};
-static struct kobj_type cache_type = {
+static const struct kobj_type cache_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@@ -566,7 +566,7 @@ static const struct sysfs_ops node_ops = {
.show = node_show,
};
-static struct kobj_type node_type = {
+static const struct kobj_type node_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 0c9bd0a53e60..06b438217c61 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
+ select DRM_AMD_DC_FP if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -20,16 +20,10 @@ config DRM_AMD_DC
panic on most architectures. We'll revert this when the following bug report
has been resolved: https://github.com/llvm/llvm-project/issues/41896.
-config DRM_AMD_DC_DCN
+config DRM_AMD_DC_FP
def_bool n
help
- Raven, Navi, and newer family support for display engine
-
-config DRM_AMD_DC_HDCP
- bool "Enable HDCP support in DC"
- depends on DRM_AMD_DC
- help
- Choose this option if you want to support HDCP authentication.
+ Floating point support, required for DCN-based SoCs
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
@@ -50,7 +44,7 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
depends on DEBUG_FS
- depends on DRM_AMD_DC_DCN
+ depends on DRM_AMD_DC_FP
help
Choose this option if you want to
support secure display
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 2633de77de5e..0d610cb376bb 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -36,18 +36,14 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
-ifdef CONFIG_DRM_AMD_DC_HDCP
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
-endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src
-ifdef CONFIG_DRM_AMD_DC_HDCP
DAL_LIBS += modules/hdcp
-endif
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 90fb0f3cdb6f..249b073f6a23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -33,7 +33,7 @@ AMDGPUDM = \
amdgpu_dm_mst_types.o \
amdgpu_dm_color.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
endif
@@ -41,9 +41,7 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
AMDGPUDM += amdgpu_dm_hdcp.o
-endif
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 009ef917dad4..a3645a3fd257 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -52,10 +52,8 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_crtc.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
-#endif
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -344,12 +342,52 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
{
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
- else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
return true;
else
return false;
}
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
@@ -394,7 +432,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
WARN_ON(!e);
- vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
@@ -468,7 +506,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dev = acrtc->base.dev;
vblank = &drm_dev->vblank[acrtc->base.index];
previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
@@ -492,7 +530,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* if a pageflip happened inside front-porch.
*/
if (vrr_active) {
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc->dm_irq_params.stream &&
@@ -532,7 +570,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
vrr_active, acrtc->dm_irq_params.active_planes);
@@ -544,7 +582,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!vrr_active)
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/**
* Following stuff must happen at start of vblank, for crc
@@ -675,7 +713,14 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
- DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
hpd_aconnector = aconnector;
break;
}
@@ -1488,9 +1533,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dc_callback_init init_params;
-#endif
int r;
adev->dm.ddev = adev_to_drm(adev);
@@ -1498,9 +1541,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&init_params, 0, sizeof(init_params));
-#endif
mutex_init(&adev->dm.dpia_aux_lock);
mutex_init(&adev->dm.dc_lock);
@@ -1726,7 +1767,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1737,7 +1777,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
-#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctxs) {
@@ -1844,7 +1883,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.secure_display_ctxs = NULL;
}
#endif
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
adev->dm.hdcp_workqueue = NULL;
@@ -1852,7 +1890,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (adev->dm.dc)
dc_deinit_callbacks(adev->dm.dc);
-#endif
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
@@ -2273,7 +2310,7 @@ static int dm_late_init(void *handle)
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
return -EINVAL;
@@ -2449,11 +2486,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
enable ? "enable" : "disable");
if (enable) {
- rc = dm_enable_vblank(&acrtc->base);
+ rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
if (rc)
DRM_WARN("Failed to enable vblank interrupts\n");
} else {
- dm_disable_vblank(&acrtc->base);
+ amdgpu_dm_crtc_disable_vblank(&acrtc->base);
}
}
@@ -2496,7 +2533,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_state(dc, context);
+ res = dc_commit_streams(dc, context->streams, context->stream_count);
fail:
dc_release_state(context);
@@ -2682,10 +2719,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
bundle->surface_updates[m].surface->force_full_update =
true;
}
- dc_commit_updates_for_stream(
- dm->dc, bundle->surface_updates,
- dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
}
cleanup:
@@ -2755,7 +2795,7 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2923,7 +2963,7 @@ const struct amdgpu_ip_block_version dm_ip_block =
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
- .get_format_info = amd_get_format_info,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -2974,8 +3014,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = true;
luminance_range = &conn_base->display_info.luminance_range;
- caps->aux_min_input_signal = luminance_range->min_luminance;
- caps->aux_max_input_signal = luminance_range->max_luminance;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
}
void amdgpu_dm_update_connector_after_detect(
@@ -3111,11 +3157,9 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid = NULL;
kfree(aconnector->timing_requested);
aconnector->timing_requested = NULL;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -3132,9 +3176,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
bool ret = false;
if (adev->dm.disable_hpd_irq)
@@ -3146,12 +3188,10 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
*/
mutex_lock(&aconnector->hpd_lock);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
dm_con_state->update_hdcp = true;
}
-#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -3398,12 +3438,10 @@ out:
}
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
if (adev->dm.hdcp_workqueue)
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
}
-#endif
if (dc_link->type != dc_connection_mst_branch)
drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
@@ -4320,9 +4358,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
- if (!plane->blends_with_above || !plane->blends_with_below)
- continue;
-
if (!plane->pixel_format_support.argb8888)
continue;
@@ -4947,7 +4982,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
@@ -4956,7 +4991,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- fill_blending_from_plane_state(
+ amdgpu_dm_plane_fill_blending_from_plane_state(
plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
@@ -4975,7 +5010,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
int ret;
bool force_disable_dcc = false;
- ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
return ret;
@@ -5105,9 +5140,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
- &dirty_rects[i], clips->x1,
- clips->y1, clips->x2 - clips->x1,
- clips->y2 - clips->y1,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
@@ -5753,7 +5788,6 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode,
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -5784,6 +5818,10 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
verified_link_cap = dc_link_get_link_cap(stream->link);
link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
@@ -5806,8 +5844,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (bw_range.max_kbps < link_bw_in_kbps) {
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
0,
&stream->timing,
&dsc_cfg)) {
@@ -5821,8 +5858,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bw_in_kbps,
&stream->timing,
&dsc_cfg)) {
@@ -5843,6 +5879,10 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
u32 dsc_max_supported_bw_in_kbps;
u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -5861,8 +5901,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5879,8 +5918,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dsc_max_supported_bw_in_kbps > 0)
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5904,7 +5942,6 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
@@ -5927,9 +5964,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
-#endif
struct dc_sink *sink = NULL;
@@ -6028,12 +6063,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->timing = *aconnector->timing_requested;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
-#endif
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -6759,7 +6792,6 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
@@ -6833,7 +6865,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
return 0;
}
-#endif
static int to_drm_connector_type(enum signal_type st)
{
@@ -7158,12 +7189,18 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
@@ -7244,7 +7281,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
@@ -7262,10 +7298,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (!aconnector->mst_root)
drm_connector_attach_vrr_capable_property(&aconnector->base);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
-#endif
}
}
@@ -7527,7 +7561,6 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
struct drm_crtc_state *old_crtc_state,
struct drm_connector_state *new_conn_state,
@@ -7647,7 +7680,6 @@ static bool is_content_protection_different(struct drm_crtc_state *new_crtc_stat
pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
@@ -7716,7 +7748,7 @@ static void update_freesync_state_on_stream(
&vrr_params);
if (adev->family < AMDGPU_FAMILY_AI &&
- amdgpu_dm_vrr_active(new_crtc_state)) {
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
mod_freesync_handle_v_update(dm->freesync_module,
new_stream, &vrr_params);
@@ -7834,8 +7866,8 @@ static void update_stream_irq_parameters(
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
- bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
- bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
if (!old_vrr_active && new_vrr_active) {
/* Transition VRR inactive -> active:
@@ -7846,7 +7878,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7854,7 +7886,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7873,7 +7905,7 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
*/
for_each_old_plane_in_state(state, plane, old_plane_state, i)
if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -7896,7 +7928,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
int planes_count = 0, vpos, hpos;
unsigned long flags;
u32 target_vblank, last_flip_vblank;
- bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
bool dirty_rects_changed = false;
@@ -7958,7 +7990,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
- fill_dc_scaling_info(dm->adev, new_plane_state,
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
&bundle->scaling_infos[planes_count]);
bundle->surface_updates[planes_count].scaling_info =
@@ -8178,12 +8210,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
- dc_commit_updates_for_stream(dm->dc,
- bundle->surface_updates,
- planes_count,
- acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
/**
* Enable or disable the interrupts on the backend.
@@ -8446,7 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* aconnector as needed
*/
- if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
@@ -8501,7 +8533,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -8527,7 +8559,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8638,7 +8669,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
-#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -8715,12 +8745,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
- status->plane_count,
- dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
@@ -9274,7 +9303,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (modereset_required(new_crtc_state))
goto skip_modeset;
- if (modeset_required(new_crtc_state, new_stream,
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
dm_old_crtc_state->stream)) {
WARN_ON(dm_new_crtc_state->stream);
@@ -9625,7 +9654,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
@@ -9771,7 +9800,6 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -9797,7 +9825,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
}
-#endif
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -9841,11 +9868,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
bool lock_and_validation_needed = false;
bool is_top_most_overlay = true;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct dsc_mst_fairness_vars vars[MAX_PIPES];
-#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -9876,7 +9901,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state->connectors_changed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -9888,7 +9912,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
-#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -10026,13 +10049,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
-#endif
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
@@ -10098,7 +10119,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* set the slot info for each mst_state based on the link encoding format */
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
struct amdgpu_dm_connector *aconnector;
@@ -10118,7 +10138,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
drm_connector_list_iter_end(&iter);
}
-#endif
/**
* Streams and planes are reset when there are changes that affect
@@ -10146,7 +10165,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
@@ -10158,7 +10176,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
}
-#endif
/*
* Perform validation of MST topology in the state:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index ed5cbe9da40c..904f9e2fd35b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -461,9 +461,7 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
struct mod_freesync *freesync_module;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct hdcp_workqueue *hdcp_workqueue;
-#endif
/**
* @vblank_control_workqueue:
@@ -747,9 +745,7 @@ struct dm_connector_state {
uint8_t underscan_hborder;
bool underscan_enable;
bool freesync_capable;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
bool update_hdcp;
-#endif
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index dc4f37240beb..1d924dc51a3e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -34,7 +34,7 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
struct drm_crtc *crtc = &acrtc->base;
struct drm_device *dev = crtc->dev;
@@ -54,14 +54,14 @@ void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
{
return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
}
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc)
{
return acrtc->dm_irq_params.freesync_config.state ==
@@ -70,7 +70,7 @@ bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
VRR_STATE_ACTIVE_FIXED;
}
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -89,7 +89,7 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -159,11 +159,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_vrr_active(acrtc_state))
- rc = dm_set_vupdate_irq(crtc, true);
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
- rc = dm_set_vupdate_irq(crtc, false);
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
}
if (rc)
@@ -199,12 +199,12 @@ skip:
return 0;
}
-int dm_enable_vblank(struct drm_crtc *crtc)
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
{
return dm_set_vblank(crtc, true);
}
-void dm_disable_vblank(struct drm_crtc *crtc)
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
{
dm_set_vblank(crtc, false);
}
@@ -300,8 +300,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = dm_enable_vblank,
- .disable_vblank = dm_disable_vblank,
+ .enable_vblank = amdgpu_dm_crtc_enable_vblank,
+ .disable_vblank = amdgpu_dm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
@@ -381,7 +381,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
dm_update_crtc_active_planes(crtc, crtc_state);
if (WARN_ON(unlikely(!dm_crtc_state->stream &&
- modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
+ amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 1ac8692354cf..17e948753f59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -27,21 +27,21 @@
#ifndef __AMDGPU_DM_CRTC_H__
#define __AMDGPU_DM_CRTC_H__
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream);
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc);
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
-int dm_enable_vblank(struct drm_crtc *crtc);
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
-void dm_disable_vblank(struct drm_crtc *crtc);
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc);
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 09a3efa517da..827fcb4fb3b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -724,7 +724,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
- dc_link_set_test_pattern(
+ dc_link_dp_set_test_pattern(
link,
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_RGB,
@@ -947,7 +947,6 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return 0;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/*
* Returns the HDCP capability of the Display (1.4 for now).
*
@@ -984,7 +983,6 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
return 0;
}
-#endif
/*
* Returns whether the connected display is internal and not hotpluggable.
@@ -2593,9 +2591,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
-#endif
DEFINE_SHOW_ATTRIBUTE(internal_display);
DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
@@ -2726,9 +2722,7 @@ static const struct {
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"lttpr_status", &dp_lttpr_status_fops},
{"test_pattern", &dp_phy_test_pattern_fops},
-#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
-#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
@@ -2749,14 +2743,13 @@ static const struct {
{"is_dpia_link", &is_dpia_link_fops}
};
-#ifdef CONFIG_DRM_AMD_DC_HDCP
static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
{"hdcp_sink_capability", &hdcp_sink_capability_fops}
};
-#endif
+
/*
* Force YUV420 output if available from the given mode
*/
@@ -2801,6 +2794,22 @@ static int psr_get(void *data, u64 *val)
}
/*
+ * Read PSR state residency
+ */
+static int psr_read_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency;
+
+ link->dc->link_srv->edp_get_psr_residency(link, &residency);
+
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Set dmcub trace event IRQ enable or disable.
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
* Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
@@ -2835,6 +2844,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
+ "%llu\n");
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -2998,6 +3009,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+ debugfs_create_file_unsafe("psr_residency", 0444, dir,
+ connector, &psr_residency_fops);
debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
&current_backlight_fops);
debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
@@ -3015,7 +3028,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
debugfs_create_file(hdmi_debugfs_entries[i].name,
@@ -3023,7 +3035,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
hdmi_debugfs_entries[i].fops);
}
}
-#endif
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8e572f07ec47..5536d17306d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -559,9 +559,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
+ link->dp.dp2_enabled = config->dp2_enabled;
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- link->adjust.auth_delay = 0;
+ link->adjust.auth_delay = 2;
link->adjust.hdcp1.disable = 0;
conn_state = aconnector->base.state;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1583157da355..9c1e91c2179e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
+static void dm_helpers_construct_old_payload(
+ struct dc_link *link,
+ int pbn_per_slot,
+ struct drm_dp_mst_atomic_payload *new_payload,
+ struct drm_dp_mst_atomic_payload *old_payload)
+{
+ struct link_mst_stream_allocation_table current_link_table =
+ link->mst_stream_alloc_table;
+ struct link_mst_stream_allocation *dc_alloc;
+ int i;
+
+ *old_payload = *new_payload;
+
+ /* Set correct time_slots/PBN of old payload.
+ * other fields (delete & dsc_enabled) in
+ * struct drm_dp_mst_atomic_payload are don't care fields
+ * while calling drm_dp_remove_payload()
+ */
+ for (i = 0; i < current_link_table.stream_count; i++) {
+ dc_alloc =
+ &current_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == new_payload->vcpi) {
+ old_payload->time_slots = dc_alloc->slot_count;
+ old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+ break;
+ }
+ }
+
+ /* make sure there is an old payload*/
+ ASSERT(i != current_link_table.stream_count);
+
+}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- if (enable)
- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
- else
- drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+ if (enable) {
+ target_payload = new_payload;
+
+ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+ } else {
+ /* construct old payload by VCPI*/
+ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+ new_payload, &old_payload);
+ target_payload = &old_payload;
+
+ drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+ }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}
@@ -468,8 +511,8 @@ bool dm_helpers_dp_read_dpcd(
return false;
}
- return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
- data, size) > 0;
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
+ size) == size;
}
bool dm_helpers_dp_write_dpcd(
@@ -525,7 +568,6 @@ bool dm_helpers_submit_i2c(
return result;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -693,7 +735,6 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
return ret;
}
-#endif
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
@@ -719,13 +760,11 @@ bool dm_helpers_dp_write_dsc_enable(
if (!aconnector->dsc_aux)
return false;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
// apply w/a to synaptics
if (needs_dsc_aux_workaround(aconnector->dc_link) &&
(aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
aconnector->dsc_aux, stream, enable_dsc);
-#endif
port = aconnector->mst_output_port;
@@ -763,17 +802,13 @@ bool dm_helpers_dp_write_dsc_enable(
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
-#endif
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
-#if defined(CONFIG_DRM_AMD_DC_DCN)
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
}
-#endif
}
return ret;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e25e1b2bf194..6378352346c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -31,10 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_mst_types.h"
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
-#endif
#include "dc.h"
#include "dm_helpers.h"
@@ -201,7 +198,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool needs_dsc_aux_workaround(struct dc_link *link)
{
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
@@ -212,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
+bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
+{
+ u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
+
+ if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
+ DRM_INFO("Synaptics Cascaded MST hub\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -235,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
+ /* synaptics cascaded MST hub case */
+ if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ aconnector->dsc_aux = port->mgr->aux;
+
if (!aconnector->dsc_aux)
return false;
@@ -271,7 +286,6 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
-#endif
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
@@ -362,7 +376,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
* plugged back with same display index, its hdcp properties
* will be retrieved from hdcp_work within dm_dp_mst_get_modes
*/
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (aconnector->dc_sink && connector->state) {
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -374,13 +387,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector->state->content_protection =
hdcp_w->content_protection[connector->index];
}
-#endif
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
memset(&aconnector->dc_sink->dsc_caps,
0, sizeof(aconnector->dc_sink->dsc_caps));
@@ -388,7 +399,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!retrieve_downstream_port_device(aconnector))
memset(&aconnector->mst_downstream_port_present,
0, sizeof(aconnector->mst_downstream_port_present));
-#endif
}
}
@@ -647,8 +657,6 @@ int dm_mst_get_pbn_divider(struct dc_link *link)
dc_link_get_link_cap(link)) / (8 * 1000 * 54);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
struct dsc_mst_fairness_params {
struct dc_crtc_timing *timing;
struct dc_sink *sink;
@@ -662,12 +670,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+ u8 link_coding_cap;
+ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+ if (link_coding_cap == DP_128b_132b_ENCODING)
+ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+ return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps = div_u64(peak_kbps, 1000);
+ peak_kbps *= fec_overhead_multiplier_x1000;
+ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -678,16 +699,19 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
{
struct drm_connector *drm_connector;
int i;
+ struct dc_dsc_config_options dsc_options = {0};
for (i = 0; i < count; i++) {
drm_connector = &params[i].aconnector->base;
+ dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
+
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
- params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- drm_connector->display_info.max_dsc_bpp,
+ &dsc_options,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -730,15 +754,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
u64 kbps;
struct drm_connector *drm_connector = &param.aconnector->base;
- uint32_t max_dsc_target_bpp_limit_override =
- drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
- param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;
@@ -761,11 +786,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -861,6 +887,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -890,7 +917,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -903,7 +930,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -932,6 +959,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -993,7 +1021,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1012,7 +1040,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1020,7 +1048,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1154,6 +1182,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct resource_pool *res_pool;
int link_vars_start_index = 0;
int ret = 0;
@@ -1162,6 +1191,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
for (i = 0; i < dc_state->stream_count; i++) {
stream = dc_state->streams[i];
+ res_pool = stream->ctx->dc->res_pool;
if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
continue;
@@ -1177,7 +1207,8 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
- if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (!res_pool->funcs->remove_stream_from_ctx ||
+ res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
@@ -1435,14 +1466,12 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1484,16 +1513,13 @@ enum dc_status dm_dp_mst_is_port_support_mode(
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
-#endif
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
if (pbn > aconnector->mst_output_port->full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
}
-#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 97fd70df531b..1e4ede1e57ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+
+/**
+ * Panamera MST Hub detection
+ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
+ * Check from beginning of branch device vendor specific field (050Ch)
+ */
+#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
+#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
+#define SYNAPTICS_CASCADED_HUB_ID 0x5A
+#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 28fb1f02591a..322668973747 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -90,12 +90,12 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
{
return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
}
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value)
{
@@ -741,25 +741,7 @@ static int get_plane_formats(const struct drm_plane *plane,
return num_formats;
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane)
-{
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_property,
- 0);
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_size_property,
- MAX_COLOR_LUT_ENTRIES);
- drm_object_attach_property(&plane->base, dm->ctm_property,
- 0);
- drm_object_attach_property(&plane->base, dm->sdr_boost_property,
- DEFAULT_SDR_BOOST);
-
- return 0;
-}
-#endif
-
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -918,7 +900,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_new->dc_state;
bool force_disable_dcc = !plane_state->dcc.enable;
- fill_plane_buffer_attributes(
+ amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
@@ -999,7 +981,7 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
*min_downscale = 1000;
}
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
struct drm_framebuffer *fb = state->fb;
@@ -1053,7 +1035,7 @@ int dm_plane_helper_check_state(struct drm_plane_state *state,
state, new_crtc_state, min_scale, max_scale, true, true);
}
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
@@ -1161,11 +1143,11 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -1229,7 +1211,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
@@ -1314,7 +1296,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
plane->state->crtc_w = new_state->crtc_w;
plane->state->crtc_h = new_state->crtc_h;
- handle_cursor_update(plane, old_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
@@ -1337,10 +1319,6 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
if (amdgpu_state)
__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (amdgpu_state)
- amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST;
-#endif
}
static struct drm_plane_state *
@@ -1360,15 +1338,6 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
dc_plane_state_retain(dm_plane_state->dc_state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (dm_plane_state->degamma_lut)
- drm_property_blob_get(dm_plane_state->degamma_lut);
- if (dm_plane_state->ctm)
- drm_property_blob_get(dm_plane_state->ctm);
-
- dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost;
-#endif
-
return &dm_plane_state->base;
}
@@ -1436,103 +1405,12 @@ static void dm_drm_plane_destroy_state(struct drm_plane *plane,
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- drm_property_blob_put(dm_plane_state->degamma_lut);
- drm_property_blob_put(dm_plane_state->ctm);
-#endif
if (dm_plane_state->dc_state)
dc_plane_state_release(dm_plane_state->dc_state);
drm_atomic_helper_plane_destroy_state(plane, state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-/* copied from drm_atomic_uapi.c */
-static int atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL)
- return -EINVAL;
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
-int dm_drm_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- int ret = 0;
- bool replaced;
-
- if (property == adev->dm.degamma_lut_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->degamma_lut,
- val, -1, sizeof(struct drm_color_lut),
- &replaced);
- } else if (property == adev->dm.ctm_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->ctm,
- val,
- sizeof(struct drm_color_ctm), -1,
- &replaced);
- } else if (property == adev->dm.sdr_boost_property) {
- dm_plane_state->sdr_boost = val;
- } else {
- return -EINVAL;
- }
-
- return ret;
-}
-
-int dm_drm_plane_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
-
- if (property == adev->dm.degamma_lut_property) {
- *val = (dm_plane_state->degamma_lut) ?
- dm_plane_state->degamma_lut->base.id : 0;
- } else if (property == adev->dm.ctm_property) {
- *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0;
- } else if (property == adev->dm.sdr_boost_property) {
- *val = dm_plane_state->sdr_boost;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1541,10 +1419,6 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
.format_mod_supported = dm_plane_format_mod_supported,
-#ifdef CONFIG_DRM_AMD_DC_HDR
- .atomic_set_property = dm_drm_plane_set_property,
- .atomic_get_property = dm_drm_plane_get_property,
-#endif
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@@ -1615,9 +1489,6 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- attach_color_mgmt_properties(dm, plane);
-#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index a4bee8528a51..930f1572f898 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -29,17 +29,17 @@
#include "dc.h"
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -56,9 +56,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value);
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 94f156d57220..69ffd4424dc7 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,14 +22,13 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
+DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual dsc
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
KCOV_INSTRUMENT := n
DC_LIBS += dcn20
-DC_LIBS += dsc
DC_LIBS += dcn10
DC_LIBS += dcn21
DC_LIBS += dcn201
@@ -56,9 +55,7 @@ ifdef CONFIG_DRM_AMD_DC_SI
DC_LIBS += dce60
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
DC_LIBS += hdcp
-endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index e381de2429fa..f0f948501e9a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2064,7 +2064,7 @@ static enum bp_result bios_parser_get_encoder_cap_info(
if (!info)
return BP_RESULT_BADINPUT;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
/* encoder cap record not available in v1_5 */
if (bp->object_info_tbl.revision.minor == 5)
return BP_RESULT_NORECORD;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 271d8e573181..ad390e4cd0a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -74,7 +74,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN10
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 69691daf4dbb..6127d6045336 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -104,7 +104,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (dc->hwss.exit_optimized_pwr_state)
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
@@ -116,7 +116,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
+ dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -129,13 +129,13 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
- dc_link_set_psr_allow_active(edp_link,
+ dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -221,7 +221,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dce120_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case FAMILY_RV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
@@ -351,7 +351,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
default:
ASSERT(0); /* Unknown Asic */
break;
@@ -364,7 +364,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
switch (clk_mgr_base->ctx->asic_id.chip_family) {
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
@@ -405,7 +405,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
default:
break;
}
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
kfree(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index f0577dcd1af6..811720749faf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -162,7 +162,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
-struct clk_mgr_funcs dcn201_funcs = {
+static struct clk_mgr_funcs dcn201_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn201_update_clocks,
.init_clocks = dcn201_init_clocks,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ca6dfd2d7561..bd9fd0b54f46 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -706,7 +706,7 @@ void rn_clk_mgr_construct(
enum pp_smu_status status = 0;
int is_green_sardine = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 24715ca2fa94..01383aac6b41 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
};
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
unsigned int voltage)
{
@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.num_entries = j + 1;
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
}
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 89df7244b272..5cb44f838bde 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 61768bf726f8..af108f88b112 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -255,27 +255,60 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s
}
}
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
+}
+
static void dcn32_update_clocks_update_dentist(
struct clk_mgr_internal *clk_mgr,
- struct dc_state *context,
- uint32_t old_dispclk_khz)
+ struct dc_state *context)
{
uint32_t new_disp_divider = 0;
- uint32_t old_disp_divider = 0;
uint32_t new_dispclk_wdivider = 0;
uint32_t old_dispclk_wdivider = 0;
uint32_t i;
+ uint32_t dentist_dispclk_wdivider_readback = 0;
+ struct dc *dc = clk_mgr->base.ctx->dc;
- if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0)
+ if (clk_mgr->base.clks.dispclk_khz == 0)
return;
new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
- old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz;
new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
- old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider);
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &old_dispclk_wdivider);
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
@@ -314,6 +347,17 @@ static void dcn32_update_clocks_update_dentist(
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback != 126) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, 126);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -341,6 +385,18 @@ static void dcn32_update_clocks_update_dentist(
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback > new_dispclk_wdivider) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, new_dispclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
}
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -361,7 +417,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
bool p_state_change_support;
bool fclk_p_state_change_support;
int total_plane_count;
- int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz;
if (dc->work_arounds.skip_clock_update)
return;
@@ -503,19 +558,19 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
- dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.
*/
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
index 57e09c7c95f5..186daada7b03 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
@@ -32,6 +32,9 @@ void dcn32_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower);
+
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1c218c526650..ae5f1b7b4fef 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -53,7 +53,6 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "dc_link.h"
#include "link.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -74,6 +73,8 @@
#include "dc_trace.h"
+#include "hw_sequencer_private.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -147,7 +148,7 @@ static void destroy_links(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
if (NULL != dc->links[i])
- link_destroy(&dc->links[i]);
+ dc->link_srv->destroy_link(&dc->links[i]);
}
}
@@ -216,7 +217,7 @@ static bool create_links(
link_init_params.connector_index = i;
link_init_params.link_index = dc->link_count;
link_init_params.dc = dc;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
@@ -238,7 +239,7 @@ static bool create_links(
link_init_params.dc = dc;
link_init_params.is_dpia_link = true;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
link->dc = dc;
@@ -399,6 +400,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -814,6 +823,9 @@ static void dc_destruct(struct dc *dc)
dc_destroy_resource_pool(dc);
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
@@ -973,6 +985,8 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
+ dc->link_srv = link_create_link_service();
+
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
@@ -984,7 +998,7 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
if (dc->res_pool->funcs->update_bw_bounding_box) {
@@ -1057,6 +1071,53 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
}
}
+static void phantom_pipe_blank(
+ struct dc *dc,
+ struct timing_generator *tg,
+ int width,
+ int height)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ otg_active_width = width;
+ otg_active_height = height;
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (tg->funcs->is_tg_enabled(tg))
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -1115,8 +1176,14 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* again for different use.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (tg->funcs->enable_crtc)
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
+ }
}
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1199,7 +1266,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- link_set_dpms_off(pipe);
+ dc->link_srv->set_dpms_off(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1298,7 +1365,7 @@ static void detect_edp_presence(struct dc *dc)
int i;
int edp_num;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (!edp_num)
return;
@@ -1324,16 +1391,12 @@ void dc_hardware_init(struct dc *dc)
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
dc->ctx->cp_psp = init_params->cp_psp;
-#endif
}
void dc_deinit_callbacks(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
-#endif
}
void dc_destroy(struct dc **dc)
@@ -1658,7 +1721,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
}
@@ -2001,53 +2064,6 @@ context_alloc_fail:
return res;
}
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
- int i;
-
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs. */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- result = dc_commit_streams(dc, context->streams, context->stream_count);
- return result == DC_OK;
- }
-
- if (!streams_changed(dc, context->streams, context->stream_count)) {
- return DC_OK;
- }
-
- DC_LOG_DC("%s: %d streams\n",
- __func__, context->stream_count);
-
- for (i = 0; i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
-
- dc_stream_log(dc, stream);
- }
-
- /*
- * Previous validation was perfomred with fast_validation = true and
- * the full DML state required for hardware programming was skipped.
- *
- * Re-validate here to calculate these parameters / watermarks.
- */
- result = dc_validate_global_state(dc, context, false);
- if (result != DC_OK) {
- DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
- dc_status_to_str(result), result);
- return result;
- }
-
- result = dc_commit_state_no_check(dc, context);
-
- return (result == DC_OK);
-}
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
@@ -2134,27 +2150,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- if (is_flip_pending_in_pipes(dc, context))
- return;
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == NULL ||
- context->res_ctx.pipe_ctx[i].plane_state == NULL) {
- context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
- }
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ }
- process_deferred_updates(dc);
+ process_deferred_updates(dc);
- dc->hwss.optimize_bandwidth(dc, context);
+ dc->hwss.optimize_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
- dc->hwss.update_dsc_pg(dc, context, true);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
dc->optimized_required = false;
dc->wm_optimized_required = false;
@@ -3173,7 +3195,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
}
if (stream_update->hdr_static_metadata &&
@@ -3209,13 +3233,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dsc_config)
- link_update_dsc_config(pipe_ctx);
+ dc->link_srv->update_dsc_config(pipe_ctx);
if (stream_update->mst_bw_update) {
if (stream_update->mst_bw_update->is_increase)
- link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
else
- link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
}
if (stream_update->pending_test_pattern) {
@@ -3229,7 +3255,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3239,7 +3265,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- link_set_dpms_on(dc->current_state, pipe_ctx);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
}
@@ -3510,14 +3536,9 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
-
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
return;
}
@@ -4083,24 +4104,30 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
dc_update_planes_and_stream(dc, srf_updates,
surface_count, stream,
stream_update);
return;
}
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
-
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4123,12 +4150,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
- } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ } else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
- *
- * Only relevant for DCN behavior where we can guarantee the optimization
- * is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}
@@ -4305,7 +4329,7 @@ void dc_resume(struct dc *dc)
uint32_t i;
for (i = 0; i < dc->link_count; i++)
- link_resume(dc->links[i]);
+ dc->link_srv->resume(dc->links[i]);
}
bool dc_is_dmcu_initialized(struct dc *dc)
@@ -4317,157 +4341,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address)
-{
- if (dc->res_pool->oem_device)
- return dce_i2c_oem_device_present(
- dc->res_pool,
- dc->res_pool->oem_device,
- slave_address);
-
- return false;
-}
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd)
-{
-
- struct dc_link *link = dc->links[link_index];
- struct ddc_service *ddc = link->ddc;
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-}
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd)
-{
- struct ddc_service *ddc = dc->res_pool->oem_device;
- if (ddc)
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-
- return false;
-}
-
-static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
-{
- if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- dc_sink_retain(sink);
-
- dc_link->remote_sinks[dc_link->sink_count] = sink;
- dc_link->sink_count++;
-
- return true;
-}
-
-/*
- * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
- *
- * EDID length is in bytes
- */
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data)
-{
- struct dc_sink *dc_sink;
- enum dc_edid_status edid_status;
-
- if (len > DC_MAX_EDID_BUFFER_SIZE) {
- dm_error("Max EDID buffer size breached!\n");
- return NULL;
- }
-
- if (!init_data) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- if (!init_data->link) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- dc_sink = dc_sink_create(init_data);
-
- if (!dc_sink)
- return NULL;
-
- memmove(dc_sink->dc_edid.raw_edid, edid, len);
- dc_sink->dc_edid.length = len;
-
- if (!link_add_remote_sink_helper(
- link,
- dc_sink))
- goto fail_add_sink;
-
- edid_status = dm_helpers_parse_edid_caps(
- link,
- &dc_sink->dc_edid,
- &dc_sink->edid_caps);
-
- /*
- * Treat device as no EDID device if EDID
- * parsing fails
- */
- if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
- dc_sink->dc_edid.length = 0;
- dm_error("Bad EDID, status%d!\n", edid_status);
- }
-
- return dc_sink;
-
-fail_add_sink:
- dc_sink_release(dc_sink);
- return NULL;
-}
-
-/*
- * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
- *
- * Note that this just removes the struct dc_sink - it doesn't
- * program hardware or alter other members of dc_link
- */
-void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
-{
- int i;
-
- if (!link->sink_count) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- for (i = 0; i < link->sink_count; i++) {
- if (link->remote_sinks[i] == sink) {
- dc_sink_release(sink);
- link->remote_sinks[i] = NULL;
-
- /* shrink array to remove empty place */
- while (i < link->sink_count - 1) {
- link->remote_sinks[i] = link->remote_sinks[i+1];
- i++;
- }
- link->remote_sinks[i] = NULL;
- link->sink_count--;
- return;
- }
- }
-}
-
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -4990,7 +4863,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
return;
}
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
/* Determine panel inst */
for (i = 0; i < edp_num; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 652270a0b498..2acbf692193f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -73,28 +73,38 @@ struct out_csc_color_matrix_type {
static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_RGB_TYPE,
- { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0} },
{ COLOR_SPACE_RGB_LIMITED_TYPE,
- { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { 0x1B67, 0, 0, 0x201,
+ 0, 0x1B67, 0, 0x201,
+ 0, 0, 0x1B67, 0x201} },
{ COLOR_SPACE_YCBCR601_TYPE,
- { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
- 0xF6B7, 0xE04, 0x1004} },
+ { 0xE04, 0xF444, 0xFDB9, 0x1004,
+ 0x831, 0x1016, 0x320, 0x201,
+ 0xFB45, 0xF6B7, 0xE04, 0x1004} },
{ COLOR_SPACE_YCBCR709_TYPE,
- { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
- 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+ { 0xE04, 0xF345, 0xFEB7, 0x1004,
+ 0x5D3, 0x1399, 0x1FA, 0x201,
+ 0xFCCA, 0xF533, 0xE04, 0x1004} },
/* TODO: correct values below */
{ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
- { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
- 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { 0xE00, 0xF447, 0xFDB9, 0x1000,
+ 0x991, 0x12C9, 0x3A6, 0x200,
+ 0xFB47, 0xF6B9, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
- { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
- 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ { 0xE00, 0xF349, 0xFEB7, 0x1000,
+ 0x6CE, 0x16E3, 0x24F, 0x200,
+ 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
- { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
- 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
+ { 0x1000, 0xF149, 0xFEB7, 0x1004,
+ 0x0868, 0x15B2, 0x01E6, 0x201,
+ 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
- { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
- 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
+ { 0x0000, 0x0000, 0x0000, 0x1000,
+ 0x0000, 0x0000, 0x0000, 0x0200,
+ 0x0000, 0x0000, 0x0000, 0x1000} },
};
static bool is_rgb_type(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 74e465ba158d..41198c729d90 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
struct dc_link_settings link_settings = {0};
- link_decide_link_settings(stream, &link_settings);
+ stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings);
if ((link_settings.link_rate >= LINK_RATE_LOW) &&
link_settings.link_rate <= LINK_RATE_HIGH3) {
is_dig_stream = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index a951e10416ee..58fa911b1417 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -34,70 +34,443 @@
* in this file which calls link functions.
*/
#include "link.h"
+#include "dce/dce_i2c.h"
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num)
+{
+ int i;
+
+ *edp_num = 0;
+ for (i = 0; i < dc->link_count; i++) {
+ // report any eDP links, even unconnected DDI's
+ if (!dc->links[i])
+ continue;
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
+ edp_links[*edp_num] = dc->links[i];
+ if (++(*edp_num) == MAX_NUM_EDP)
+ return;
+ }
+ }
+}
+
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num, i;
+
+ *inst_out = 0;
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (link == edp_links[i])
+ break;
+ (*inst_out)++;
+ }
+ return true;
+}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
- return link_detect(link, reason);
+ return link->dc->link_srv->detect_link(link, reason);
}
bool dc_link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type)
{
- return link_detect_connection_type(link, type);
+ return link->dc->link_srv->detect_connection_type(link, type);
}
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
- return link_get_status(link);
+ return link->dc->link_srv->get_status(link);
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/* return true if the connected receiver supports the hdcp version */
bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
- return link_is_hdcp14(link, signal);
+ return link->dc->link_srv->is_hdcp1x_supported(link, signal);
}
bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
{
- return link_is_hdcp22(link, signal);
+ return link->dc->link_srv->is_hdcp2x_supported(link, signal);
}
-#endif
void dc_link_clear_dprx_states(struct dc_link *link)
{
- link_clear_dprx_states(link);
+ link->dc->link_srv->clear_dprx_states(link);
}
bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link)
{
- return link_reset_cur_dp_mst_topology(link);
+ return link->dc->link_srv->reset_cur_dp_mst_topology(link);
}
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings)
{
- return dp_link_bandwidth_kbps(link, link_settings);
-}
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- return link_timing_bandwidth_kbps(timing);
+ return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
- link_get_cur_res_map(dc, map);
+ dc->link_srv->get_cur_res_map(dc, map);
}
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
{
- link_restore_res_map(dc, map);
+ dc->link_srv->restore_res_map(dc, map);
}
bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
{
- return link_update_dsc_config(pipe_ctx);
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ return link->dc->link_srv->update_dsc_config(pipe_ctx);
+}
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address)
+{
+ if (dc->res_pool->oem_device)
+ return dce_i2c_oem_device_present(
+ dc->res_pool,
+ dc->res_pool->oem_device,
+ slave_address);
+
+ return false;
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+}
+
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd)
+{
+ struct ddc_service *ddc = dc->res_pool->oem_device;
+
+ if (ddc)
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+
+ return false;
+}
+
+void dc_link_dp_handle_automated_test(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_automated_test(link);
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ return link->dc->link_srv->dp_set_test_pattern(link, test_pattern,
+ test_pattern_color_space, p_link_settings,
+ p_custom_pattern, cust_pattern_size);
+}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link)
+{
+ struct link_resource link_res;
+
+ dc->link_srv->get_cur_link_res(link, &link_res);
+ dc->link_srv->dp_set_drive_settings(link, &link_res, lt_settings);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ dc->link_srv->dp_set_preferred_link_settings(dc, link_setting, link);
+}
+
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ dc->link_srv->dp_set_preferred_training_settings(dc, link_setting,
+ lt_overrides, link, skip_immediate_retrain);
+}
+
+bool dc_dp_trace_is_initialized(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_is_initialized(link);
+}
+
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged)
+{
+ link->dc->link_srv->dp_trace_set_is_logged_flag(link, in_detection, is_logged);
+}
+
+bool dc_dp_trace_is_logged(struct dc_link *link, bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_is_logged(link, in_detection);
+}
+
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_end_timestamp(link, in_detection);
+}
+
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_counts(link, in_detection);
+}
+
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_get_link_loss_count(link);
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ return link->dc->link_srv->add_remote_sink(link, edid, len, init_data);
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ link->dc->link_srv->remove_remote_sink(link, sink);
+}
+
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ const struct dc *dc = ddc->link->dc;
+
+ return dc->link_srv->aux_transfer_raw(
+ ddc, payload, operation_result);
+}
+
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ struct dc *dc, uint8_t bw)
+{
+ return dc->link_srv->bw_kbps_from_raw_frl_link_rate_data(bw);
+}
+
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ return link->dc->link_srv->edp_decide_link_settings(link, link_setting, req_bw);
+}
+
+
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap)
+{
+ return link->dc->link_srv->dp_get_max_link_enc_cap(link, max_link_enc_cap);
+}
+
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link)
+{
+ return link->dc->link_srv->mst_decide_link_encoding_format(link);
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_get_verified_link_cap(link);
+}
+
+bool dc_link_is_dp_sink_present(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_sink_present(link);
+}
+
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_fec_supported(link);
+}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ link->dc->link_srv->dp_overwrite_extended_receiver_cap(link);
+}
+
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_enable_fec(link);
+}
+
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw)
+{
+ return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
+}
+
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+{
+ link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
+}
+
+bool dc_link_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ return link->dc->link_srv->dp_parse_link_loss_status(link, hpd_irq_dpcd_data);
+}
+
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_allow_hpd_rx_irq(link);
+}
+
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_link_loss(link);
+}
+
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ return link->dc->link_srv->dp_read_hpd_rx_irq_data(link, irq_data);
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ return link->dc->link_srv->dp_handle_hpd_rx_irq(link, out_hpd_irq_dpcd_data,
+ out_link_loss, defer_handling, has_left_work);
+}
+
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ link->dc->link_srv->dpcd_write_rx_power_ctrl(link, on);
+}
+
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting)
+{
+ return link->dc->link_srv->dp_decide_lttpr_mode(link, link_setting);
+}
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+{
+ link->dc->link_srv->edp_panel_backlight_power_on(link, wait_for_hpd);
+}
+
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_backlight_level(link);
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ return link->dc->link_srv->edp_get_backlight_level_nits(link,
+ backlight_millinits_avg,
+ backlight_millinits_peak);
+}
+
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ return link->dc->link_srv->edp_set_backlight_level(link,
+ backlight_pwm_u16_16, frame_ramp);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ return link->dc->link_srv->edp_set_backlight_level_nits(link, isHDR,
+ backlight_millinits, transition_time_in_ms);
+}
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_target_backlight_pwm(link);
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+{
+ return link->dc->link_srv->edp_get_psr_state(link, state);
+}
+
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_psr_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
+}
+
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ return link->dc->link_srv->edp_wait_for_t12(link);
+}
+
+bool dc_link_get_hpd_state(struct dc_link *link)
+{
+ return link->dc->link_srv->get_hpd_state(link);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->disable_hpd(link);
+}
+
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ link->dc->link_srv->enable_hpd_filter(link, enable);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d9f2ef242b0f..85d54bfb595c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -232,7 +232,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
init_data->num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc);
@@ -276,7 +276,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_21:
res_pool = dcn321_create_resource_pool(init_data, dc);
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
}
@@ -2213,7 +2213,7 @@ enum dc_status dc_remove_stream_from_ctx(
del_pipe->stream_res.stream_enc,
false);
- if (link_is_dp_128b_132b_signal(del_pipe)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
@@ -2513,9 +2513,10 @@ enum dc_status resource_map_pool_resources(
* and link settings
*/
if (dc_is_dp_signal(stream->signal)) {
- if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
+ if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
- if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
&context->res_ctx, pool, stream);
@@ -3685,7 +3686,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
/* TODO: validate audio ASIC caps, encoder */
if (res == DC_OK)
- res = link_validate_mode_timing(stream,
+ res = dc->link_srv->validate_mode_timing(stream,
link,
&stream->timing);
@@ -3812,7 +3813,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
- if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
@@ -4027,14 +4028,14 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
else
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
#endif
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
#endif
}
@@ -4046,7 +4047,7 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx)
{
- if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index cde8ed2560b3..eda2152dcd1f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,9 +47,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_save_init(dc);
-#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1fde43378689..2818483964dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,9 +29,7 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#include "hdcp_types.h"
-#endif
+#include "hdcp_msg_types.h"
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -47,7 +45,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.223"
+#define DC_VER "3.2.227"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -84,8 +82,6 @@ enum det_size {
struct dc_plane_cap {
enum dc_plane_type type;
- uint32_t blends_with_above : 1;
- uint32_t blends_with_below : 1;
uint32_t per_pixel_alpha : 1;
struct {
uint32_t argb8888 : 1;
@@ -716,6 +712,7 @@ struct dc_bounding_box_overrides {
struct dc_state;
struct resource_pool;
struct dce_hwseq;
+struct link_service;
/**
* struct dc_debug_options - DC debug struct
@@ -795,6 +792,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
+ int minimum_z8_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@@ -874,6 +872,7 @@ struct dc_debug_options {
bool disable_unbounded_requesting;
bool dig_fifo_off_in_blank;
bool temp_mst_deallocation_sequence;
+ bool override_dispclk_programming;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -890,6 +889,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_PIPES * 2];
+ struct link_service *link_srv;
struct dc_state *current_state;
struct resource_pool *res_pool;
@@ -991,11 +991,7 @@ struct dc_init_data {
};
struct dc_callback_init {
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#else
- uint8_t reserved;
-#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
@@ -1362,10 +1358,6 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context);
-
struct dc_state *dc_create_state(struct dc *dc);
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
@@ -1378,9 +1370,163 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+/* The function returns minimum bandwidth required to drive a given timing
+ * return - minimum required timing bandwidth in kbps.
+ */
+uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+
/* Link Interfaces */
-/* TODO: remove this after resolving external dependencies */
-#include "dc_link.h"
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+ bool is_automated; /* Indicates automated testing */
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+};
+
+/* Return an enumerated dc_link.
+ * dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index);
+
+/* Return instance id of the edp link. Inst 0 is primary edp link. */
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out);
+
+/* Return an array of link pointers to edp links. */
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num);
/* The function initiates detection handshake over the given link. It first
* determines if there are display connections over the link. If so it initiates
@@ -1404,6 +1550,38 @@ uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
*/
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
+struct dc_sink_init_data;
+
+/* When link connection type is dc_connection_mst_branch, remote sink can be
+ * added to the link. The interface creates a remote sink and associates it with
+ * current link. The sink will be retained by link until remove remote sink is
+ * called.
+ *
+ * @dc_link - link the remote sink will be added to.
+ * @edid - byte array of EDID raw data.
+ * @len - size of the edid in byte
+ * @init_data -
+ */
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+/* Remove remote sink from a link with dc_connection_mst_branch connection type.
+ * @link - link the sink should be removed from
+ * @sink - sink to be removed.
+ */
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Enable HPD interrupt handler for a given link */
+void dc_link_enable_hpd(const struct dc_link *link);
+
+/* Disable HPD interrupt handler for a given link */
+void dc_link_disable_hpd(const struct dc_link *link);
+
/* determine if there is a sink connected to the link
*
* @type - dc_connection_single if connected, dc_connection_none otherwise.
@@ -1417,14 +1595,115 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool dc_link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
+/* query current hpd pin value
+ * return - true HPD is asserted (HPD high), false otherwise (HPD low)
+ *
+ */
+bool dc_link_get_hpd_state(struct dc_link *link);
+
/* Getter for cached link status from given link */
const struct dc_link_status *dc_link_get_status(const struct dc_link *link);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
+/* enable/disable hardware HPD filter.
+ *
+ * @link - The link the HPD pin is associated with.
+ * @enable = true - enable hardware HPD filter. HPD event will only queued to irq
+ * handler once after no HPD change has been detected within dc default HPD
+ * filtering interval since last HPD event. i.e if display keeps toggling hpd
+ * pulses within default HPD interval, no HPD event will be received until HPD
+ * toggles have stopped. Then HPD event will be queued to irq handler once after
+ * dc default HPD filtering interval since last HPD event.
+ *
+ * @enable = false - disable hardware HPD filter. HPD event will be queued
+ * immediately to irq handler after no HPD change has been detected within
+ * IRQ_HPD (aka HPD short pulse) interval (i.e 2ms).
+ */
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
+/* submit i2c read/write payloads through ddc channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+/* submit i2c read/write payloads through oem channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd);
+
+enum aux_return_code_type;
+/* Attempt to transfer the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned in the payload->reply
+ * and the result through operation_result. Returns the number of bytes
+ * transferred,or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address
+);
+
/* return true if the connected receiver supports the hdcp version */
bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
+
+/* Notify DC about DP RX Interrupt (aka DP IRQ_HPD).
+ *
+ * TODO - When defer_handling is true the function will have a different purpose.
+ * It no longer does complete hpd rx irq handling. We should create a separate
+ * interface specifically for this case.
+ *
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM.
+ */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+/* handle DP specs define test automation sequence*/
+void dc_link_dp_handle_automated_test(struct dc_link *link);
+
+/* handle DP Link loss sequence and try to recover RX link loss with best
+ * effort
+ */
+void dc_link_dp_handle_link_loss(struct dc_link *link);
+
+/* Determine if hpd rx irq should be handled or ignored
+ * return true - hpd rx irq should be handled.
+ * return false - it is safe to ignore hpd rx irq event
+ */
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
+
+/* Determine if link loss is indicated with a given hpd_irq_dpcd_data.
+ * @link - link the hpd irq data associated with
+ * @hpd_irq_dpcd_data - input hpd irq data
+ * return - true if hpd irq data indicates a link lost
+ */
+bool dc_link_check_link_loss_status(struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+/* Read hpd rx irq data from a given link
+ * @link - link where the hpd irq data should be read from
+ * @irq_data - output hpd irq data
+ * return - DC_OK if hpd irq data is read successfully, otherwise hpd irq data
+ * read has failed.
+ */
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
/* The function clears recorded DP RX states in the link. DM should call this
* function when it is resuming from S3 power state to previously connected links.
@@ -1450,12 +1729,6 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
-/* The function returns minimum bandwidth required to drive a given timing
- * return - minimum required timing bandwidth in kbps.
- */
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing);
-
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -1493,6 +1766,269 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
* interface i.e stream_update->dsc_config
*/
bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
+
+/* translate a raw link rate data to bandwidth in kbps */
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ struct dc *dc, uint8_t bw);
+
+/* determine the optimal bandwidth given link and required bw.
+ * @link - current detected link
+ * @req_bw - requested bandwidth in kbps
+ * @link_settings - returned most optimal link settings that can fit the
+ * requested bandwidth
+ * return - false if link can't support requested bandwidth, true if link
+ * settings is found.
+ */
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_settings,
+ uint32_t req_bw);
+
+/* return the max dp link settings can be driven by the link without considering
+ * connected RX device and its capability
+ */
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+/* determine when the link is driving MST mode, what DP link channel coding
+ * format will be used. The decision will remain unchanged until next HPD event.
+ *
+ * @link - a link with DP RX connection
+ * return - if stream is committed to this link with MST signal type, type of
+ * channel coding format dc will choose.
+ */
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link);
+
+/* get max dp link settings the link can enable with all things considered. (i.e
+ * TX/RX/Cable capabilities and dp override policies.
+ *
+ * @link - a link with DP RX connection
+ * return - max dp link settings the link can enable.
+ *
+ */
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+
+/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
+ * to a link with dp connector signal type.
+ * @link - a link with dp connector signal type
+ * return - true if connected, false otherwise
+ */
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
+/* Force DP lane settings update to main-link video signal and notify the change
+ * to DP RX via DPCD. This is a debug interface used for video signal integrity
+ * tuning purpose. The interface assumes link has already been enabled with DP
+ * signal.
+ *
+ * @lt_settings - a container structure with desired hw_lane_settings
+ */
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link);
+
+/* Enable a test pattern in Link or PHY layer in an active link for compliance
+ * test or debugging purpose. The test pattern will remain until next un-plug.
+ *
+ * @link - active link with DP signal output enabled.
+ * @test_pattern - desired test pattern to output.
+ * NOTE: set to DP_TEST_PATTERN_VIDEO_MODE to disable previous test pattern.
+ * @test_pattern_color_space - for video test pattern choose a desired color
+ * space.
+ * @p_link_settings - For PHY pattern choose a desired link settings
+ * @p_custom_pattern - some test pattern will require a custom input to
+ * customize some pattern details. Otherwise keep it to NULL.
+ * @cust_pattern_size - size of the custom pattern input.
+ *
+ */
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/* Force DP link settings to always use a specific value until reboot to a
+ * specific link. If link has already been enabled, the interface will also
+ * switch to desired link settings immediately. This is a debug interface to
+ * generic dp issue trouble shooting.
+ */
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+
+/* Force DP link to customize a specific link training behavior by overriding to
+ * standard DP specs defined protocol. This is a debug interface to trouble shoot
+ * display specific link training issues or apply some display specific
+ * workaround in link training.
+ *
+ * @link_settings - if not NULL, force preferred link settings to the link.
+ * @lt_override - a set of override pointers. If any pointer is none NULL, dc
+ * will apply this particular override in future link training. If NULL is
+ * passed in, dc resets previous overrides.
+ * NOTE: DM must keep the memory from override pointers until DM resets preferred
+ * training settings.
+ */
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+/* return - true if FEC is supported with connected DP RX, false otherwise */
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
+/* query FEC enablement policy to determine if FEC will be enabled by dc during
+ * link enablement.
+ * return - true if FEC should be enabled, false otherwise.
+ */
+bool dc_link_should_enable_fec(const struct dc_link *link);
+
+/* determine lttpr mode the current link should be enabled with a specific link
+ * settings.
+ */
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+/* Force DP RX to update its power state.
+ * NOTE: this interface doesn't update dp main-link. Calling this function will
+ * cause DP TX main-link and DP RX power states out of sync. DM has to restore
+ * RX power state back upon finish DM specific execution requiring DP RX in a
+ * specific power state.
+ * @on - true to set DP RX in D0 power state, false to set DP RX in D3 power
+ * state.
+ */
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+/* Force link to read base dp receiver caps from dpcd 000h - 00Fh and overwrite
+ * current value read from extended receiver cap from 02200h - 0220Fh.
+ * Some DP RX has problems of providing accurate DP receiver caps from extended
+ * field, this interface is a workaround to revert link back to use base caps.
+ */
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
+ bool wait_for_hpd);
+
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
+
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
+/* Determine if dp trace has been initialized to reflect upto date result *
+ * return - true if trace is initialized and has valid data. False dp trace
+ * doesn't have valid result.
+ */
+bool dc_dp_trace_is_initialized(struct dc_link *link);
+
+/* Query a dp trace flag to indicate if the current dp trace data has been
+ * logged before
+ */
+bool dc_dp_trace_is_logged(struct dc_link *link,
+ bool in_detection);
+
+/* Set dp trace flag to indicate whether DM has already logged the current dp
+ * trace data. DM can set is_logged to true upon logging and check
+ * dc_dp_trace_is_logged before logging to avoid logging the same result twice.
+ */
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+
+/* Obtain driver time stamp for last dp link training end. The time stamp is
+ * formatted based on dm_get_timestamp DM function.
+ * @in_detection - true to get link training end time stamp of last link
+ * training in detection sequence. false to get link training end time stamp
+ * of last link training in commit (dpms) sequence
+ */
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link training attempts dc has done with latest sequence.
+ * @in_detection - true to get link training count of last link
+ * training in detection sequence. false to get link training count of last link
+ * training in commit (dpms) sequence
+ */
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link loss has happened since last link training attempts */
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+
+/*
+ * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
+ */
+/*
+ * Send a request from DP-Tx requesting to allocate BW remotely after
+ * allocating it locally. This will get processed by CM and a CB function
+ * will be called.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: The requested bw in Kbyte to allocated
+ *
+ * return: none
+ */
+void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
+ uint8_t bw, uint8_t result);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -1511,7 +2047,7 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
@@ -1603,7 +2139,6 @@ void dc_resume(struct dc *dc);
void dc_power_down_on_boot(struct dc *dc);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/*
* HDCP Interfaces
*/
@@ -1611,7 +2146,6 @@ enum hdcp_message_status dc_process_hdcp_msg(
enum signal_type signal,
struct dc_link *link,
struct hdcp_protection_message *message_info);
-#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c2092775ca88..b5c6501c28fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -421,7 +421,6 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
}
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
@@ -638,7 +637,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
pipe_data->pipe_config.subvp_data.main_vblank_end =
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
- pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
/* Calculate the scaling factor from the src and dst height.
@@ -680,11 +679,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
- pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
}
@@ -750,7 +749,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
!pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
- } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -775,7 +775,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}
-#endif
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 809a1851f196..af53278662ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -921,12 +921,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif
-#ifndef DP_LINK_SQUARE_PATTERN
-#define DP_LINK_SQUARE_PATTERN 0x10F
-#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
-#endif
#ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161
#endif
@@ -939,12 +933,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#endif
-#ifndef DP_LINK_SQUARE_PATTERN
-#define DP_LINK_SQUARE_PATTERN 0x10F
-#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
-#endif
#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
#endif
@@ -988,10 +976,6 @@ struct dpcd_usb4_dp_tunneling_info {
#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
/* TODO - Use DRM header to replace above once available */
#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
-
-#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
-#endif
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1261,4 +1245,161 @@ union dpcd_sink_ext_caps {
} bits;
uint8_t raw;
};
+
+enum dc_link_fec_state {
+ dc_link_fec_not_ready,
+ dc_link_fec_ready,
+ dc_link_fec_enabled
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char ENABLE_PSR2 : 1;
+ unsigned char EARLY_TRANSPORT_ENABLE : 1;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_alpm_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char IRQ_HPD_ENABLE : 1;
+ unsigned char RESERVED : 6;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_sink_active_vtotal_control_mode {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char VSC_SDP_ERROR :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct edp_trace_power_timestamps {
+ uint64_t poweroff;
+ uint64_t poweron;
+};
+
+struct dp_trace_lt_counts {
+ unsigned int total;
+ unsigned int fail;
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL_LANE0,
+ LINK_TRAINING_CR_FAIL_LANE1,
+ LINK_TRAINING_CR_FAIL_LANE23,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+ LINK_TRAINING_LQA_FAIL,
+ /* one of the CR,EQ or symbol lock is dropped */
+ LINK_TRAINING_LINK_LOSS,
+ /* Abort link training (because sink unplugged) */
+ LINK_TRAINING_ABORT,
+ DP_128b_132b_LT_FAILED,
+ DP_128b_132b_MAX_LOOP_COUNT_REACHED,
+ DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
+ DP_128b_132b_CDS_DONE_TIMEOUT,
+};
+
+struct dp_trace_lt {
+ struct dp_trace_lt_counts counts;
+ struct dp_trace_timestamps {
+ unsigned long long start;
+ unsigned long long end;
+ } timestamps;
+ enum link_training_result result;
+ bool is_logged;
+};
+
+struct dp_trace {
+ struct dp_trace_lt detect_lt_trace;
+ struct dp_trace_lt commit_lt_trace;
+ unsigned int link_loss_count;
+ bool is_initialized;
+ struct edp_trace_power_timestamps edp_trace_power_timestamps;
+};
+
+/* TODO - This is a temporary location for any new DPCD definitions.
+ * We should move these to drm_dp header.
+ */
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
+#endif
+#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#endif
+#ifndef DP_TUNNELING_IRQ
+#define DP_TUNNELING_IRQ (1 << 5)
+#endif
+/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
+#ifndef DP_TUNNELING_CAPABILITIES
+#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_ID
+#define USB4_DRIVER_ID 0xE000F /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_BW_CAPABILITY
+#define USB4_DRIVER_BW_CAPABILITY 0xE0020 /* 1.4a */
+#endif
+#ifndef DP_IN_ADAPTER_TUNNEL_INFO
+#define DP_IN_ADAPTER_TUNNEL_INFO 0xE0021 /* 1.4a */
+#endif
+#ifndef DP_BW_GRANULALITY
+#define DP_BW_GRANULALITY 0xE0022 /* 1.4a */
+#endif
+#ifndef ESTIMATED_BW
+#define ESTIMATED_BW 0xE0023 /* 1.4a */
+#endif
+#ifndef ALLOCATED_BW
+#define ALLOCATED_BW 0xE0024 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_STATUS
+#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
+#endif
+#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
+#endif
+#ifndef REQUESTED_BW
+#define REQUESTED_BW 0xE0031 /* 1.4a */
+#endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 684713b2cff7..0e92a322c2ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -54,6 +54,12 @@ struct dc_dsc_policy {
bool enable_dsc_when_not_needed;
};
+struct dc_dsc_config_options {
+ uint32_t dsc_min_slice_height_override;
+ uint32_t max_target_bpp_limit_override_x16;
+ uint32_t slice_height_granularity;
+};
+
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
@@ -71,8 +77,7 @@ bool dc_dsc_compute_bandwidth_range(
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
@@ -100,4 +105,6 @@ void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable);
void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable);
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index cc3d6fb39364..100d62162b71 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -829,7 +829,7 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
@@ -1085,5 +1085,19 @@ struct tg_color {
uint16_t color_b_cb;
};
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
+};
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
deleted file mode 100644
index cecd807f5ed8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright 2012-14 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DC_LINK_H_
-#define DC_LINK_H_
-
-#include "dc.h"
-#include "dc_types.h"
-#include "grph_object_defs.h"
-
-struct link_resource;
-enum aux_return_code_type;
-
-enum dc_link_fec_state {
- dc_link_fec_not_ready,
- dc_link_fec_ready,
- dc_link_fec_enabled
-};
-
-/* DP MST stream allocation (payload bandwidth number) */
-struct link_mst_stream_allocation {
- /* DIG front */
- const struct stream_encoder *stream_enc;
- /* HPO DP Stream Encoder */
- const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
- /* associate DRM payload table with DC stream encoder */
- uint8_t vcp_id;
- /* number of slots required for the DP stream in transport packet */
- uint8_t slot_count;
-};
-
-/* DP MST stream allocation table */
-struct link_mst_stream_allocation_table {
- /* number of DP video streams */
- int stream_count;
- /* array of stream allocations */
- struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
-};
-
-struct edp_trace_power_timestamps {
- uint64_t poweroff;
- uint64_t poweron;
-};
-
-struct dp_trace_lt_counts {
- unsigned int total;
- unsigned int fail;
-};
-
-struct dp_trace_lt {
- struct dp_trace_lt_counts counts;
- struct dp_trace_timestamps {
- unsigned long long start;
- unsigned long long end;
- } timestamps;
- enum link_training_result result;
- bool is_logged;
-};
-
-struct dp_trace {
- struct dp_trace_lt detect_lt_trace;
- struct dp_trace_lt commit_lt_trace;
- unsigned int link_loss_count;
- bool is_initialized;
- struct edp_trace_power_timestamps edp_trace_power_timestamps;
-};
-
-/* PSR feature flags */
-struct psr_settings {
- bool psr_feature_enabled; // PSR is supported by sink
- bool psr_allow_active; // PSR is currently active
- enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
- bool psr_vtotal_control_support; // Vtotal control is supported by sink
- unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
-
- /* These parameters are calculated in Driver,
- * based on display timing and Sink capabilities.
- * If VBLANK region is too small and Sink takes a long time
- * to set up RFB, it may take an extra frame to enter PSR state.
- */
- bool psr_frame_capture_indication_req;
- unsigned int psr_sdp_transmit_line_num_deadline;
- uint8_t force_ffu_mode;
- unsigned int psr_power_opt;
-};
-
-/* To split out "global" and "per-panel" config settings.
- * Add a struct dc_panel_config under dc_link
- */
-struct dc_panel_config {
- /* extra panel power sequence parameters */
- struct pps {
- unsigned int extra_t3_ms;
- unsigned int extra_t7_ms;
- unsigned int extra_delay_backlight_off;
- unsigned int extra_post_t7_ms;
- unsigned int extra_pre_t11_ms;
- unsigned int extra_t12_ms;
- unsigned int extra_post_OUI_ms;
- } pps;
- /* PSR */
- struct psr {
- bool disable_psr;
- bool disallow_psrsu;
- bool rc_disable;
- bool rc_allow_static_screen;
- bool rc_allow_fullscreen_VPB;
- } psr;
- /* ABM */
- struct varib {
- unsigned int varibright_feature_enable;
- unsigned int def_varibright_level;
- unsigned int abm_config_setting;
- } varib;
- /* edp DSC */
- struct dsc {
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
- } dsc;
- /* eDP ILR */
- struct ilr {
- bool optimize_edp_link_rate; /* eDP ILR */
- } ilr;
-};
-
-/*
- * USB4 DPIA BW ALLOCATION STRUCTS
- */
-struct dc_dpia_bw_alloc {
- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int sink_max_bw; // The Max BW that sink can require/support
- int estimated_bw; // The estimated available BW for this DPIA
- int bw_granularity; // BW Granularity
- bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
- bool response_ready; // Response ready from the CM side
-};
-
-#define MAX_SINKS_PER_LINK 4
-
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool is_internal_display;
-
- /* TODO: Rename. Flag an endpoint as having a programmable mapping to a
- * DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
-
- bool test_pattern_enabled;
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- struct hdcp_caps hdcp_caps;
-#endif
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- } wa_flags;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
-};
-
-
-/**
- * dc_get_link_at_index() - Return an enumerated dc_link.
- *
- * dc_link order is constant and determined at
- * boot time. They cannot be created or destroyed.
- * Use dc_get_caps() to get number of links.
- */
-static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
-{
- return dc->links[link_index];
-}
-
-static inline void get_edp_links(const struct dc *dc,
- struct dc_link **edp_links,
- int *edp_num)
-{
- int i;
-
- *edp_num = 0;
- for (i = 0; i < dc->link_count; i++) {
- // report any eDP links, even unconnected DDI's
- if (!dc->links[i])
- continue;
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
- edp_links[*edp_num] = dc->links[i];
- if (++(*edp_num) == MAX_NUM_EDP)
- return;
- }
- }
-}
-
-static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
- const struct dc_link *link,
- unsigned int *inst_out)
-{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num, i;
-
- *inst_out = 0;
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- return false;
- get_edp_links(dc, edp_links, &edp_num);
- for (i = 0; i < edp_num; i++) {
- if (link == edp_links[i])
- break;
- (*inst_out)++;
- }
- return true;
-}
-
-/* Set backlight level of an embedded panel (eDP, LVDS).
- * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
- * and 16 bit fractional, where 1.0 is max backlight value.
- */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
-
-/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
- bool isHDR,
- uint32_t backlight_millinits,
- uint32_t transition_time_in_ms);
-
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
- uint32_t *backlight_millinits,
- uint32_t *backlight_millinits_peak);
-
-int dc_link_get_backlight_level(const struct dc_link *dc_link);
-
-int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
- bool wait, bool force_static, const unsigned int *power_opts);
-
-bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
-
-bool dc_link_setup_psr(struct dc_link *dc_link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context);
-
-bool dc_link_get_hpd_state(struct dc_link *dc_link);
-
-/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
- * Return:
- * true - Downstream port status changed. DM should call DC to do the
- * detection.
- * false - no change in Downstream port status. No further action required
- * from DM. */
-bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work);
-
-/*
- * On eDP links this function call will stall until T12 has elapsed.
- * If the panel is not in power off state, this function will return
- * immediately.
- */
-bool dc_link_wait_for_t12(struct dc_link *link);
-
-void dc_link_dp_handle_automated_test(struct dc_link *link);
-void dc_link_dp_handle_link_loss(struct dc_link *link);
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
-bool dc_link_check_link_loss_status(struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data);
-enum dc_status dc_link_dp_read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data);
-struct dc_sink_init_data;
-
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *dc_link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data);
-
-void dc_link_remove_remote_sink(
- struct dc_link *link,
- struct dc_sink *sink);
-
-/* Used by diagnostics for virtual link at the moment */
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
-
-/**
- *****************************************************************************
- * Function: dc_link_enable_hpd_filter
- *
- * @brief
- * If enable is true, programs HPD filter on associated HPD line to default
- * values dependent on link->connector_signal
- *
- * If enable is false, programs HPD filter on associated HPD line with no
- * delays on connect or disconnect
- *
- * @param [in] link: pointer to the dc link
- * @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
- */
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
-
-bool dc_link_is_dp_sink_present(struct dc_link *link);
-/*
- * DPCD access interfaces
- */
-
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link);
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link);
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain);
-void dc_link_enable_hpd(const struct dc_link *link);
-void dc_link_disable_hpd(const struct dc_link *link);
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link);
-
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link);
-
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address
-);
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd);
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd);
-
-bool dc_link_is_fec_supported(const struct dc_link *link);
-bool dc_link_should_enable_fec(const struct dc_link *link);
-
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
-
-/* take a snapshot of current link resource allocation state */
-void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
-/* restore link resource allocation state from a snapshot */
-void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
-void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
- bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
- bool in_detection,
- bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
- bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-
-/* Attempt to transfer the given aux payload. This function does not perform
- * retries or handle error states. The reply is returned in the payload->reply
- * and the result through operation_result. Returns the number of bytes
- * transferred,or -1 on a failure.
- */
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
- struct aux_payload *payload,
- enum aux_return_code_type *operation_result);
-
-enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
- struct dc_link_settings *link_setting);
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
-bool dc_link_decide_edp_link_settings(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw);
-void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
- bool wait_for_hpd);
-
-/*
- * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
- */
-/*
- * Send a request from DP-Tx requesting to allocate BW remotely after
- * allocating it locally. This will get processed by CM and a CB function
- * will be called.
- *
- * @link: pointer to the dc_link struct instance
- * @req_bw: The requested bw in Kbyte to allocated
- *
- * return: none
- */
-void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
-
-/*
- * CB function for when the status of the Req above is complete. We will
- * find out the result of allocating on CM and update structs accordingly
- *
- * @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
- *
- * return: none
- */
-void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result);
-
-/*
- * Handle the USB4 BW Allocation related functionality here:
- * Plug => Try to allocate max bw from timing parameters supported by the sink
- * Unplug => de-allocate bw
- *
- * @link: pointer to the dc_link struct instance
- * @peak_bw: Peak bw used by the link/sink
- *
- * return: allocated bw else return 0
- */
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
-
-/* TODO: this is not meant to be exposed to DM. Should switch to stream update
- * interface i.e stream_update->dsc_config
- */
-bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
-#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 27d0242d6cbd..45ab48fe5d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -38,10 +38,9 @@
#include "dc_hw_types.h"
#include "dal_types.h"
#include "grph_object_defs.h"
+#include "grph_object_ctrl_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
/* forward declarations */
struct dc_plane_state;
@@ -812,9 +811,7 @@ struct dc_context {
uint32_t dc_edp_id_count;
uint64_t fbc_gpu_addr;
struct dc_dmub_srv *dmub_srv;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#endif
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
};
@@ -954,7 +951,6 @@ struct dc_link_status {
struct dpcd_caps *dpcd_caps;
};
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
union hdcp_rx_caps {
struct {
uint8_t version;
@@ -981,5 +977,114 @@ struct hdcp_caps {
union hdcp_rx_caps rx_caps;
union hdcp_bcaps bcaps;
};
-#endif
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* HPO DP Stream Encoder */
+ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+#define MAX_CONTROLLER_NUM 6
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+ bool psr_vtotal_control_support; // Vtotal control is supported by sink
+ unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+ uint8_t force_ffu_mode;
+ unsigned int psr_power_opt;
+};
+
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ /* extra panel power sequence parameters */
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ /* nit brightness */
+ struct nits_brightness {
+ unsigned int peak; /* nits */
+ unsigned int max_avg; /* nits */
+ unsigned int min; /* 1/10000 nits */
+ unsigned int max_nonboost_brightness_millinits;
+ unsigned int min_brightness_millinits;
+ } nits_brightness;
+ /* PSR */
+ struct psr {
+ bool disable_psr;
+ bool disallow_psrsu;
+ bool rc_disable;
+ bool rc_allow_static_screen;
+ bool rc_allow_fullscreen_VPB;
+ } psr;
+ /* ABM */
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+ /* edp DSC */
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
+};
+
+/*
+ * USB4 DPIA BW ALLOCATION STRUCTS
+ */
+struct dc_dpia_bw_alloc {
+ int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
+ int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+ int sink_max_bw; // The Max BW that sink can require/support
+ int estimated_bw; // The estimated available BW for this DPIA
+ int bw_granularity; // BW Granularity
+ bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
+};
+
+#define MAX_SINKS_PER_LINK 4
+
+enum dc_hpd_enable_select {
+ HPD_EN_FOR_ALL_EDP = 0,
+ HPD_EN_FOR_PRIMARY_EDP_ONLY,
+ HPD_EN_FOR_SECONDARY_EDP_ONLY,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 140297c8ff55..739298d2dff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -832,13 +832,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
LOG_FLAG_I2cAux_DceAux,
"dce_aux_transfer_with_retries: payload->defer_delay=%u",
payload->defer_delay);
- if (payload->defer_delay > 1) {
- msleep(payload->defer_delay);
- defer_time_in_ms += payload->defer_delay;
- } else if (payload->defer_delay <= 1) {
- udelay(payload->defer_delay * 1000);
- defer_time_in_ms += payload->defer_delay;
- }
+ fsleep(payload->defer_delay * 1000);
+ defer_time_in_ms += payload->defer_delay;
}
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 165392380842..67e3df7e1b05 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -930,7 +930,13 @@ static bool dce112_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
return true;
}
/* First disable SS
@@ -995,7 +1001,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Enable DTO */
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
if (encoding == DP_128b_132b_ENCODING)
@@ -1009,9 +1014,6 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
-#else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
-#endif
} else {
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
@@ -1023,7 +1025,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- #if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1,
@@ -1031,17 +1032,12 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
- #else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
- #endif
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE(PIXEL_RATE_CNTL[inst],
PIPE0_DTO_SRC_SEL, 0);
-#endif
/*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
bp_pc_params.controller_id = pix_clk_params->controller_id;
@@ -1274,7 +1270,14 @@ static bool dcn3_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ /* Enable DTO */
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
} else
// For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index aaf33c79b09b..f600b7431e23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -204,23 +204,17 @@
type DP_DTO0_MODULO; \
type DP_DTO0_ENABLE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_REG_FIELD_LIST_DCN32(type) \
type PIPE0_DTO_SRC_SEL;
-#endif
struct dce110_clk_src_shift {
CS_REG_FIELD_LIST(uint8_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint8_t)
-#endif
};
struct dce110_clk_src_mask{
CS_REG_FIELD_LIST(uint32_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint32_t)
-#endif
};
struct dce110_clk_src_regs {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index d3cc5ec46956..e74266cc0098 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -586,7 +586,7 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (state == PSR_STATE0)
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index d9fd4ec60588..670d5ab9d998 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1009,7 +1009,7 @@ static void dce_transform_set_pixel_storage_depth(
color_depth = COLOR_DEPTH_101010;
pixel_depth = 0;
expan_mode = 1;
- BREAK_TO_DEBUGGER();
+ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
break;
}
@@ -1023,8 +1023,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- DC_LOG_WARNING("%s: Capability not supported",
- __func__);
+ DC_LOG_DC("%s: Capability not supported", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index fb0dec4ed3a6..9fc48208c2e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -148,7 +148,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
int edp_num;
uint8_t panel_mask = 0;
- get_edp_links(dc->dc, edp_links, &edp_num);
+ dc_get_edp_links(dc->dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (edp_links[i]->link_status.link_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 1e2d2cbe2c37..19440bdf6344 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -215,7 +215,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 74005b9d352a..289e42070ece 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -26,8 +26,9 @@
#ifndef _DMUB_PSR_H_
#define _DMUB_PSR_H_
-#include "os_types.h"
-#include "dc_link.h"
+#include "dc_types.h"
+struct dc_link;
+struct dmub_psr_funcs;
struct dmub_psr {
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0d4d3d586166..9fe0ce91db00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -741,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready(
/* obtain HPD */
/* TODO what to do with this? */
- hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+ hpd = ctx->dc->link_srv->get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
if (!hpd) {
BREAK_TO_DEBUGGER();
@@ -809,19 +809,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweron_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link),
- link_dp_trace_get_edp_poweron_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -836,7 +836,7 @@ void dce110_edp_power_control(
link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ if (ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -896,13 +896,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- link_dp_trace_set_edp_power_timestamp(link, power_up);
+ ctx->dc->link_srv->dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- link_dp_trace_get_edp_poweroff_timestamp(link),
- link_dp_trace_get_edp_poweron_timestamp(link));
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -930,14 +930,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
@@ -1018,7 +1018,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T7-ready.
*/
- link_edp_receiver_ready_T7(link);
+ ctx->dc->link_srv->edp_receiver_ready_T7(link);
else
DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
}
@@ -1049,7 +1049,7 @@ void dce110_edp_backlight_control(
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
- link_backlight_enable_aux(link, enable);
+ ctx->dc->link_srv->edp_backlight_enable_aux(link, enable);
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
@@ -1061,7 +1061,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- link_edp_add_delay_for_T9(link);
+ ctx->dc->link_srv->edp_add_delay_for_T9(link);
else
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
@@ -1161,7 +1161,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1172,7 +1172,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_hwss->reset_stream_encoder(pipe_ctx);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
@@ -1181,7 +1181,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
* we are just disabling a single HPO stream. Shouldn't we disable HPO
* HW control only when HPOs for all streams are disabled?
@@ -1223,7 +1223,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
pipe_ctx->stream_res.hpo_dp_stream_enc);
@@ -1245,7 +1245,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- link_edp_receiver_ready_T9(link);
+ link->dc->link_srv->edp_receiver_ready_T9(link);
}
}
}
@@ -1428,7 +1428,7 @@ static enum dc_status dce110_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1532,7 +1532,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)))
+ if (!(hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)))
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
@@ -1564,17 +1564,17 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->inst);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
if (!stream->dpms_off)
- link_set_dpms_on(context, pipe_ctx);
+ dc->link_srv->set_dpms_on(context, pipe_ctx);
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
}
@@ -1600,7 +1600,7 @@ static void power_down_encoders(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- link_blank_dp_stream(dc->links[i], false);
+ dc->link_srv->blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1739,7 +1739,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (hws->funcs.init_pipes)
hws->funcs.init_pipes(dc, context);
@@ -2083,7 +2083,7 @@ static void dce110_reset_hw_ctx_wrap(
* disabled already, no need to disable again.
*/
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
- link_set_dpms_off(pipe_ctx_old);
+ dc->link_srv->set_dpms_off(pipe_ctx_old);
/* free acquired resources*/
if (pipe_ctx_old->stream_res.audio) {
@@ -3054,13 +3054,13 @@ void dce110_enable_dp_link_output(
pipes[i].clock_source->funcs->program_pix_clk(
pipes[i].clock_source,
&pipes[i].stream_res.pix_clk_params,
- link_dp_get_encoding_format(link_settings),
+ dc->link_srv->dp_get_encoding_format(link_settings),
&pipes[i].pll_settings);
}
}
}
- if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
@@ -3077,7 +3077,7 @@ void dce110_enable_dp_link_output(
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void dce110_disable_link_output(struct dc_link *link,
@@ -3102,7 +3102,7 @@ void dce110_disable_link_output(struct dc_link *link,
link->dc->hwss.edp_power_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
static const struct hw_sequencer_funcs dce110_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 394d83a97f33..08028a1779ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -71,8 +71,6 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
-
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index f808315b2835..a4a45a6ce61e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -401,8 +401,6 @@ static const struct resource_caps stoney_resource_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
- .blends_with_below = true,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
@@ -428,7 +426,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 71b3a6949001..c9e045666dcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -59,6 +59,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_BLACK_OFFSET, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
@@ -209,6 +210,7 @@
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
@@ -495,6 +497,7 @@
type AUTOCAL_MODE; \
type AUTOCAL_NUM_PIPE; \
type AUTOCAL_PIPE_ID; \
+ type SCL_BOUNDARY_MODE; \
type SCL_BLACK_OFFSET_RGB_Y; \
type SCL_BLACK_OFFSET_CBCR; \
type SCL_V_NUM_TAPS; \
@@ -1108,6 +1111,7 @@ struct dcn_dpp_mask {
uint32_t LB_DATA_FORMAT; \
uint32_t LB_MEMORY_CTRL; \
uint32_t DSCL_AUTOCAL; \
+ uint32_t DSCL_CONTROL; \
uint32_t SCL_BLACK_OFFSET; \
uint32_t SCL_TAP_CONTROL; \
uint32_t SCL_COEF_RAM_TAP_SELECT; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f62368da875d..b33955928bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -655,6 +655,10 @@ void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
AUTOCAL_NUM_PIPE, 0,
AUTOCAL_PIPE_ID, 0);
+ /*clean scaler boundary mode when Autocal off*/
+ REG_SET(DSCL_CONTROL, 0,
+ SCL_BOUNDARY_MODE, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index b6391a5ead78..365a3215f6d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,8 +23,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#include "reg_helper.h"
#include "resource.h"
#include "dwb.h"
@@ -129,6 +127,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
dwbc10->dwbc_shift = dwbc_shift;
dwbc10->dwbc_mask = dwbc_mask;
}
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index d56ea7c8171e..5268c46ae907 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,8 +24,6 @@
#ifndef __DC_DWBC_DCN10_H__
#define __DC_DWBC_DCN10_H__
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -267,5 +265,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
int inst);
#endif
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a1a29c508394..7f9cceb49f4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -919,7 +919,7 @@ enum dc_status dcn10_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1017,7 +1017,7 @@ static void dcn10_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1564,7 +1564,7 @@ void dcn10_init_hw(struct dc *dc)
}
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -1638,7 +1638,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
int edp_num;
int i = 0;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index c4287147b853..ee08b545aaea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1219,7 +1219,6 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1321,7 +1320,7 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 6bfac8088ab0..2bb8e11f26e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -504,8 +504,6 @@ static const struct resource_caps rv2_res_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 3c451ab5d3ca..f496e952ceec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -933,7 +933,7 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
@@ -952,7 +952,7 @@ void enc1_stream_encoder_dp_blank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
@@ -1025,7 +1025,8 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
@@ -1470,10 +1471,9 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
-#endif
+
enc1_se_enable_audio_clock(enc, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 42344aec60d6..5bd698cd6d20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -50,7 +50,7 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
static void dsc2_disconnect(struct display_stream_compressor *dsc);
-const struct dsc_funcs dcn20_dsc_funcs = {
+static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f1490e97b6ce..f8667be57046 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -301,7 +301,7 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn20_dwbc_funcs = {
+static const struct dwbc_funcs dcn20_dwbc_funcs = {
.get_caps = dwb2_get_caps,
.enable = dwb2_enable,
.disable = dwb2_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index b83873a3a534..53669f832ba5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -190,10 +190,15 @@ void dcn20_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
@@ -224,6 +229,10 @@ void dcn20_enable_power_gating_plane(
REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
if (REG(DOMAIN21_PG_CONFIG))
REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
}
void dcn20_dccg_init(struct dce_hwseq *hws)
@@ -711,7 +720,7 @@ enum dc_status dcn20_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -2396,7 +2405,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -2449,7 +2458,7 @@ static void dcn20_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -2469,7 +2478,7 @@ static void dcn20_reset_back_end_for_pipe(
}
}
else if (pipe_ctx->stream_res.dsc) {
- link_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -2704,12 +2713,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
@@ -2743,7 +2752,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index ccd91792991b..259a98e4ee2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -297,7 +297,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch);
}
-const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
.config_mcif_buf = mmhubbub2_config_mcif_buf,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 116f67a0b989..5da6e44f284a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -542,7 +542,7 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
-const struct mpc_funcs dcn20_mpc_funcs = {
+static const struct mpc_funcs dcn20_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3af24ef9cb2d..77ef474ced07 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -670,8 +670,6 @@ static const struct resource_caps res_cap_nv10 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1213,8 +1211,11 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
struct hubp *dcn20_hubp_create(
@@ -2765,7 +2766,7 @@ static bool dcn20_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 42865d6c0cdd..0b47aeb60e79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -546,7 +546,8 @@ void enc2_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index f50ab961bc17..a7268027a472 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -185,13 +185,6 @@ static bool dpp201_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 61bcfa03c4e7..1aeb04fbd89d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -541,8 +541,6 @@ void dcn201_pipe_control_lock(
bool lock)
{
struct dce_hwseq *hws = dc->hwseq;
- struct hubp *hubp = NULL;
- hubp = dc->res_pool->hubps[pipe->pipe_idx];
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
index 95c4c55f067c..1af03a86ec9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
@@ -76,7 +76,7 @@ static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->shared_bottom = false;
}
-const struct mpc_funcs dcn201_mpc_funcs = {
+static const struct mpc_funcs dcn201_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 407d995bfa99..6ea70da28aaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -74,7 +74,7 @@
#define MIN_DISP_CLK_KHZ 100000
#define MIN_DPP_CLK_KHZ 100000
-struct _vcs_dpi_ip_params_st dcn201_ip = {
+static struct _vcs_dpi_ip_params_st dcn201_ip = {
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -136,7 +136,7 @@ struct _vcs_dpi_ip_params_st dcn201_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.clock_limits = {
{
.state = 0,
@@ -571,8 +571,6 @@ static const struct resource_caps res_cap_dnc201 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 15475c7e2cf9..2a182c2f57d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
return;
pipe_ctx->stream->dpms_off = false;
- link_set_dpms_on(context, pipe_ctx);
- link_set_dpms_off(pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_on(context, pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_off(pipe_ctx);
pipe_ctx->stream->dpms_off = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 8f9244fe5c86..19aaa557b2db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -609,8 +609,6 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -642,7 +640,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
index 95528e5ef89e..55e388c4c98b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -123,7 +123,6 @@ void afmt3_se_audio_setup(
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -131,7 +130,6 @@ void afmt3_se_audio_setup(
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 5f9079d3943a..9d08127d209b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -28,6 +28,7 @@
#include "dcn30_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "dc.h"
#include "core_types.h"
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index 6263408d71fc..2082372d69ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -102,6 +102,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
@@ -237,6 +238,7 @@
TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index f14f69616692..0d98918bf0fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -220,7 +220,7 @@ void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn30_dwbc_funcs = {
+static const struct dwbc_funcs dcn30_dwbc_funcs = {
.get_caps = dwb3_get_caps,
.enable = dwb3_enable,
.disable = dwb3_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index dc3e8df706b3..e46bbe7ddcc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -47,13 +47,9 @@ void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3b4d4d68359b..6d328b7e07a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -323,13 +323,10 @@ void dcn30_enable_writeback(
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
- struct timing_generator *optc;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
- /* set the OPTC source mux */
- optc = dc->res_pool->timing_generators[dwb->otg_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
@@ -534,13 +531,8 @@ void dcn30_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -567,7 +559,7 @@ void dcn30_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
@@ -998,8 +990,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
-
- dc_dmub_srv_p_state_delegate(dc,
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
index 7a93eff183d9..6f2a0d5d963b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -211,7 +211,7 @@ static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
}
-const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
.warmup_mcif = mmhubbub3_warmup_mcif,
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index ad1c1b703874..6cf40c1332bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1399,7 +1399,7 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
-const struct mpc_funcs dcn30_mpc_funcs = {
+static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index b5b5320c7bef..c9e45da6ccd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -680,8 +680,6 @@ static const struct resource_caps res_cap_dcn3 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1207,8 +1205,11 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
static struct hubp *dcn30_hubp_create(
@@ -2592,7 +2593,7 @@ static bool dcn30_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index ee62ae3eb98f..b93b4498dba4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -651,8 +651,6 @@ static struct resource_caps res_cap_dcn301 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 03ddf4f5f065..9f93c43115ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -147,8 +147,6 @@ static const struct resource_caps res_cap_dcn302 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1127,8 +1125,11 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
- if (pool->oem_device != NULL)
- link_destroy_ddc_service(&pool->oem_device);
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn302_destroy_resource_pool(struct resource_pool **pool)
@@ -1508,7 +1509,7 @@ static bool dcn302_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 31e212064168..7f72ef882ca4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -126,8 +126,6 @@ static const struct resource_caps res_cap_dcn303 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1053,8 +1051,11 @@ static void dcn303_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
- if (pool->oem_device != NULL)
- link_destroy_ddc_service(&pool->oem_device);
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn303_destroy_resource_pool(struct resource_pool **pool)
@@ -1163,7 +1164,6 @@ static bool dcn303_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc->caps.mall_size_per_mem_channel = 4;
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel *
@@ -1171,7 +1171,6 @@ static bool dcn303_resource_construct(
1024 * 1024;
dc->caps.cursor_cache_size =
dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
-#endif
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1421,7 +1420,7 @@ static bool dcn303_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
index 24e9ff65434d..05aac3e444b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -72,40 +72,6 @@ static void apg31_disable(
REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
}
-static union audio_cea_channels speakers_to_channels(
- struct audio_speaker_flags speaker_flags)
-{
- union audio_cea_channels cea_channels = {0};
-
- /* these are one to one */
- cea_channels.channels.FL = speaker_flags.FL_FR;
- cea_channels.channels.FR = speaker_flags.FL_FR;
- cea_channels.channels.LFE = speaker_flags.LFE;
- cea_channels.channels.FC = speaker_flags.FC;
-
- /* if Rear Left and Right exist move RC speaker to channel 7
- * otherwise to channel 5
- */
- if (speaker_flags.RL_RR) {
- cea_channels.channels.RL_RC = speaker_flags.RL_RR;
- cea_channels.channels.RR = speaker_flags.RL_RR;
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
- } else {
- cea_channels.channels.RL_RC = speaker_flags.RC;
- }
-
- /* FRONT Left Right Center and REAR Left Right Center are exclusive */
- if (speaker_flags.FLC_FRC) {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
- cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
- } else {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
- cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
- }
-
- return cea_channels;
-}
-
static void apg31_se_audio_setup(
struct apg *apg,
unsigned int az_inst,
@@ -113,24 +79,17 @@ static void apg31_se_audio_setup(
{
struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
- uint32_t speakers = 0;
- uint32_t channels = 0;
-
ASSERT(audio_info);
/* This should not happen.it does so we don't get BSOD*/
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
- channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
-
/* DisplayPort only allows for one audio stream with stream ID 0 */
REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
/* When running in "pair mode", pairs of audio channels have their own enable
* this is for really old audio drivers */
REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
- // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 275e78c06dee..745a5d187a98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -37,6 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
+#include "link.h"
#define CTX \
enc10->base.ctx
@@ -485,7 +486,7 @@ void dcn31_link_encoder_enable_dp_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
@@ -532,7 +533,7 @@ void dcn31_link_encoder_enable_dp_mst_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 0b317ed31f91..5b7ad38f85e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index d76f55a12eb4..0278bae50a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -26,7 +26,7 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_stream_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
+#include "dc.h"
#define DC_LOGGER \
enc3->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index d13e46eeee3c..10e3cc17f71a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -97,7 +97,7 @@ static void enable_memory_low_power(struct dc *dc)
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
#endif
@@ -202,7 +202,7 @@ void dcn31_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc->ctx->dmub_srv);
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -230,7 +230,7 @@ void dcn31_init_hw(struct dc *dc)
}
if (num_opps > 1) {
- link_blank_all_edp_displays(dc);
+ dc->link_srv->blank_all_edp_displays(dc);
break;
}
}
@@ -291,7 +291,7 @@ void dcn31_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
#endif
@@ -414,7 +414,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -565,7 +565,7 @@ static void dcn31_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -584,7 +584,7 @@ static void dcn31_reset_back_end_for_pipe(
}
}
} else if (pipe_ctx->stream_res.dsc) {
- link_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
pipe_ctx->stream = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index d3918a10773a..eaaa2e01f6d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -827,8 +827,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 962a2c02b422..467509a65fa7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -296,12 +296,14 @@ static void enc314_stream_encoder_dp_unblank(
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
+ uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
+ pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
@@ -329,6 +331,10 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
+
+ REG_UPDATE(DP_PIXEL_FORMAT,
+ DP_PIXEL_PER_CYCLE_PROCESSING_MODE,
+ pix_per_cycle);
}
/* make sure stream is disabled before resetting steer fifo */
@@ -366,7 +372,7 @@ static void enc314_stream_encoder_dp_unblank(
*/
enc314_enable_fifo(enc);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 575d3501c848..bcc03426fc3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -346,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 54ed3de869d3..50ed7e09d5ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -855,8 +855,6 @@ static const struct resource_caps res_cap_dcn314 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -887,6 +885,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
+ .minimum_z8_residency_time = 3080,
.psr_skip_crtc_disable = true,
.disable_dmcu = true,
.force_abm_enable = false,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7887078c5f64..41c972c8eb19 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index dc0b49506275..9ead347a33e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index e4472c6be6c3..3fb4bcc34353 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
- switch (otg_inst)
- {
+ switch (dp_hpo_inst) {
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
index 1c46fad0977b..271c163e4844 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
@@ -31,42 +31,6 @@
#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
.field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
-
-#define DCCG_REG_LIST_DCN32() \
- SR(DPPCLK_DTO_CTRL),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL),\
- SR(PHYDSYMCLK_CLOCK_CNTL),\
- SR(PHYESYMCLK_CLOCK_CNTL),\
- SR(DPSTREAMCLK_CNTL),\
- SR(HDMISTREAMCLK_CNTL),\
- SR(SYMCLK32_SE_CNTL),\
- SR(SYMCLK32_LE_CNTL),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 0),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 1),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 2),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 3),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 0),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 1),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 2),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
- SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
- SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
- SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
-
-
#define DCCG_MASK_SH_LIST_DCN32(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 36e6f5657942..c72448125976 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -211,10 +211,8 @@ static void enc32_stream_encoder_hdmi_set_stream_attribute(
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* Disable Audio Content Protection packet transmission */
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
-#endif
/* following belongs to audio */
/* Enable Audio InfoFrame packet transmission. */
@@ -373,7 +371,7 @@ static void enc32_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index ecd041a446d2..875b1cd46056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -31,70 +31,6 @@
#include "stream_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
-#define SE_DCN32_REG_LIST(id)\
- SRI(AFMT_CNTL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(HDMI_CONTROL, DIG, id), \
- SRI(HDMI_DB_CONTROL, DIG, id), \
- SRI(HDMI_GC, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
- SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
- SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_32_0, DIG, id),\
- SRI(HDMI_ACR_32_1, DIG, id),\
- SRI(HDMI_ACR_44_0, DIG, id),\
- SRI(HDMI_ACR_44_1, DIG, id),\
- SRI(HDMI_ACR_48_0, DIG, id),\
- SRI(HDMI_ACR_48_1, DIG, id),\
- SRI(DP_DB_CNTL, DP, id), \
- SRI(DP_MSA_MISC, DP, id), \
- SRI(DP_MSA_VBID_MISC, DP, id), \
- SRI(DP_MSA_COLORIMETRY, DP, id), \
- SRI(DP_MSA_TIMING_PARAM1, DP, id), \
- SRI(DP_MSA_TIMING_PARAM2, DP, id), \
- SRI(DP_MSA_TIMING_PARAM3, DP, id), \
- SRI(DP_MSA_TIMING_PARAM4, DP, id), \
- SRI(DP_MSE_RATE_CNTL, DP, id), \
- SRI(DP_MSE_RATE_UPDATE, DP, id), \
- SRI(DP_PIXEL_FORMAT, DP, id), \
- SRI(DP_SEC_CNTL, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id), \
- SRI(DP_SEC_CNTL2, DP, id), \
- SRI(DP_SEC_CNTL5, DP, id), \
- SRI(DP_SEC_CNTL6, DP, id), \
- SRI(DP_STEER_FIFO, DP, id), \
- SRI(DP_VID_M, DP, id), \
- SRI(DP_VID_N, DP, id), \
- SRI(DP_VID_STREAM_CNTL, DP, id), \
- SRI(DP_VID_TIMING, DP, id), \
- SRI(DP_SEC_AUD_N, DP, id), \
- SRI(DP_SEC_TIMESTAMP, DP, id), \
- SRI(DP_DSC_CNTL, DP, id), \
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DP_SEC_FRAMING4, DP, id), \
- SRI(DP_GSP11_CNTL, DP, id), \
- SRI(DME_CONTROL, DME, id),\
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(DIG_CLOCK_PATTERN, DIG, id), \
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-
#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
index 4dbad8d4b4fc..8af01f579690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index b20eb04724bb..ad33427192c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -28,68 +28,6 @@
#include "dcn21/dcn21_hubbub.h"
-#define HUBBUB_REG_LIST_DCN32(id)\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
- SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
- SR(DCHUBBUB_ARB_SAT_LEVEL),\
- SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
- SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
- SR(DCHUBBUB_SOFT_RESET),\
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE),\
- HUBBUB_SR_WATERMARK_REG_LIST(), \
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
- SR(DCHUBBUB_DET0_CTRL),\
- SR(DCHUBBUB_DET1_CTRL),\
- SR(DCHUBBUB_DET2_CTRL),\
- SR(DCHUBBUB_DET3_CTRL),\
- SR(DCHUBBUB_COMPBUF_CTRL),\
- SR(COMPBUF_RESERVED_SPACE),\
- SR(DCHUBBUB_DEBUG_CTRL_0),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCN_VM_FAULT_ADDR_MSB),\
- SR(DCN_VM_FAULT_ADDR_LSB),\
- SR(DCN_VM_FAULT_CNTL),\
- SR(DCN_VM_FAULT_STATUS),\
- SR(SDPIF_REQUEST_RATE_LIMIT),\
- SR(DCHUBBUB_CLOCK_CNTL),\
- SR(DCHUBBUB_SDPIF_CFG0),\
- SR(DCHUBBUB_SDPIF_CFG1),\
- SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
-
-
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
index 4cdbf63c952b..d5e5ed8ab869 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
@@ -31,12 +31,6 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
-#define HUBP_REG_LIST_DCN32(id)\
- HUBP_REG_LIST_DCN30(id),\
- SRI(DCHUBP_MALL_CONFIG, HUBP, id),\
- SRI(DCHUBP_VMPG_CONFIG, HUBP, id),\
- SRI(UCLK_PSTATE_FORCE, HUBPREQ, id)
-
#define HUBP_MASK_SH_LIST_DCN32(mask_sh)\
HUBP_MASK_SH_LIST_DCN31(mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, USE_MALL_SEL, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 16f892125b6f..f9073b722b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -131,10 +131,15 @@ void dcn32_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
@@ -146,6 +151,9 @@ void dcn32_enable_power_gating_plane(
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -786,13 +794,14 @@ void dcn32_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* enable_power_gating_plane before dsc_pg_control because
+ * FORCEON = 1 with hw default value on bootup, resume from s3
+ */
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -828,7 +837,7 @@ void dcn32_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
@@ -886,8 +895,6 @@ void dcn32_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -1095,7 +1102,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
@@ -1104,7 +1111,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ } else if (dc_is_dp_signal(stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
@@ -1159,7 +1166,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -1186,7 +1193,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
return false;
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) &&
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
return false;
@@ -1220,7 +1227,8 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
@@ -1252,7 +1260,7 @@ void dcn32_disable_link_output(struct dc_link *link,
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}
@@ -1406,3 +1414,86 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
}
+
+/* Blank pixel data during initialization */
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ struct output_pixel_processor *bottom_opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (num_opps == 2) {
+ otg_active_width = otg_active_width / 2;
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src1) {
+ bottom_opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (num_opps == 2) {
+ if (bottom_opp && bottom_opp->funcs->opp_set_disp_pattern_generator) {
+ bottom_opp->funcs->opp_set_disp_pattern_generator(
+ bottom_opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+ hws->funcs.wait_for_blank_complete(bottom_opp);
+ }
+ }
+
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index e9e9534f3668..84c1f36c3fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -104,4 +104,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 0694fa3a3680..dcb81662884f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -132,7 +132,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
- .init_blank = dcn20_init_blank,
+ .init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 206a5ddbaf6d..c8041cfd594d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -42,7 +42,7 @@
mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name
-static void mpc32_mpc_init(struct mpc *mpc)
+void mpc32_mpc_init(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
@@ -254,7 +254,7 @@ static void mpc32_program_post1dlut_pwl(
}
}
-static bool mpc32_program_post1dlut(
+bool mpc32_program_post1dlut(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-static bool mpc32_program_shaper(
+bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-static bool mpc32_program_3dlut(
+bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
index 61f33c0d8e59..2c2ecd053806 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
@@ -310,6 +310,19 @@ struct dcn32_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN3_0;
MPC_REG_VARIABLE_LIST_DCN32;
};
+void mpc32_mpc_init(struct mpc *mpc);
+bool mpc32_program_3dlut(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int mpcc_id);
+bool mpc32_program_post1dlut(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+bool mpc32_program_shaper(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
void dcn32_mpc_construct(struct dcn30_mpc *mpc30,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
index 5e57c39235fa..b92ba8c75694 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
@@ -28,77 +28,6 @@
#include "dcn10/dcn10_optc.h"
-#define OPTC_COMMON_REG_LIST_DCN3_2(inst) \
- SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
- SRI(OTG_VUPDATE_PARAM, OTG, inst),\
- SRI(OTG_VREADY_PARAM, OTG, inst),\
- SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\
- SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
- SRI(OTG_H_TOTAL, OTG, inst),\
- SRI(OTG_H_BLANK_START_END, OTG, inst),\
- SRI(OTG_H_SYNC_A, OTG, inst),\
- SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_H_TIMING_CNTL, OTG, inst),\
- SRI(OTG_V_TOTAL, OTG, inst),\
- SRI(OTG_V_BLANK_START_END, OTG, inst),\
- SRI(OTG_V_SYNC_A, OTG, inst),\
- SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_CONTROL, OTG, inst),\
- SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_STATUS, OTG, inst),\
- SRI(OTG_V_TOTAL_MAX, OTG, inst),\
- SRI(OTG_V_TOTAL_MIN, OTG, inst),\
- SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
- SRI(OTG_TRIGA_CNTL, OTG, inst),\
- SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
- SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
- SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
- SRI(OTG_STATUS, OTG, inst),\
- SRI(OTG_STATUS_POSITION, OTG, inst),\
- SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
- SRI(OTG_M_CONST_DTO0, OTG, inst),\
- SRI(OTG_M_CONST_DTO1, OTG, inst),\
- SRI(OTG_CLOCK_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
- SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
- SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
- SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(CONTROL, VTG, inst),\
- SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst),\
- SRI(OTG_CRC_CNTL, OTG, inst),\
- SRI(OTG_CRC0_DATA_RG, OTG, inst),\
- SRI(OTG_CRC0_DATA_B, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
- SR(GSL_SOURCE_SELECT),\
- SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GSL_WINDOW_X, OTG, inst),\
- SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
- SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
- SRI(OTG_DSC_START_POSITION, OTG, inst),\
- SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\
- SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\
- SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
- SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
- SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
- SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
-
#define OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 74e50c09bb62..1715909b1225 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -657,8 +657,6 @@ static const struct resource_caps res_cap_dcn32 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -726,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1506,8 +1505,11 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1611,7 +1613,6 @@ bool dcn32_acquire_post_bldn_3dlut(
struct dc_transfer_func **shaper)
{
bool ret = false;
- union dc_3dlut_state *state;
ASSERT(*lut == NULL && *shaper == NULL);
*lut = NULL;
@@ -1620,7 +1621,6 @@ bool dcn32_acquire_post_bldn_3dlut(
if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) {
*lut = pool->mpc_lut[mpcc_id];
*shaper = pool->mpc_shaper[mpcc_id];
- state = &pool->mpc_lut[mpcc_id]->state;
res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true;
ret = true;
}
@@ -1913,8 +1913,8 @@ int dcn32_populate_dml_pipes_from_context(
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool subvp_in_use = false;
- uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
+ bool vsr_odm_support = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1932,12 +1932,15 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
+ res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
if (context->stream_count == 1 &&
context->stream_status[0].plane_count == 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
- dc->debug.enable_single_display_2to1_odm_policy) {
+ dc->debug.enable_single_display_2to1_odm_policy &&
+ !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
@@ -2002,7 +2005,7 @@ int dcn32_populate_dml_pipes_from_context(
}
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
+ dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
@@ -2182,6 +2185,7 @@ static bool dcn32_resource_construct(
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.seamless_odm = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -2455,7 +2459,7 @@ static bool dcn32_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 3a2d7bcc4b6d..47fa51c1d3f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -59,25 +59,21 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
- uint32_t cursor_bpp = 4;
uint32_t cursor_mall_size_bytes = 0;
switch (pipe_ctx->stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
- cursor_bpp = 4;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
- cursor_bpp = 4;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
- cursor_bpp = 8;
break;
}
@@ -261,6 +257,8 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
return psr_capable;
}
+#define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -272,7 +270,6 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
* number of DET for that given plane will be split among the pipes driving that plane.
*
- *
* High level algorithm:
* 1. Split total DET among number of streams
* 2. For each stream, split DET among the planes
@@ -280,6 +277,18 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* among those pipes.
* 4. Assign the DET override to the DML pipes.
*
+ * Special cases:
+ *
+ * For two displays that have a large difference in pixel rate, we may experience
+ * underflow on the larger display when we divide the DET equally. For this, we
+ * will implement a modified algorithm to assign more DET to larger display.
+ *
+ * 1. Calculate difference in pixel rates ( multiplier ) between two displays
+ * 2. If the multiplier exceeds DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER, then
+ * implement the modified DET override algorithm.
+ * 3. Assign smaller DET size for lower pixel display and higher DET size for
+ * higher pixel display
+ *
* @param [in]: dc: Current DC state
* @param [in]: context: New DC state to be programmed
* @param [in]: pipes: Array of DML pipes
@@ -299,18 +308,46 @@ void dcn32_determine_det_override(struct dc *dc,
struct dc_plane_state *current_plane = NULL;
uint8_t stream_count = 0;
+ int phy_pix_clk_mult, lower_mode_stream_index;
+ int phy_pix_clk[MAX_PIPES] = {0};
+ bool use_new_det_override_algorithm = false;
+
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ phy_pix_clk[i] = context->streams[i]->phy_pix_clk;
stream_count++;
}
}
+ /* Check for special case with two displays, one with much higher pixel rate */
+ if (stream_count == 2) {
+ ASSERT((phy_pix_clk[0] > 0) && (phy_pix_clk[1] > 0));
+ if (phy_pix_clk[0] < phy_pix_clk[1]) {
+ lower_mode_stream_index = 0;
+ phy_pix_clk_mult = phy_pix_clk[1] / phy_pix_clk[0];
+ } else {
+ lower_mode_stream_index = 1;
+ phy_pix_clk_mult = phy_pix_clk[0] / phy_pix_clk[1];
+ }
+
+ if (phy_pix_clk_mult >= DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER)
+ use_new_det_override_algorithm = true;
+ }
+
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
continue;
+
+ if (use_new_det_override_algorithm) {
+ if (i == lower_mode_stream_index)
+ stream_segments = 4;
+ else
+ stream_segments = 14;
+ }
+
if (context->stream_status[i].plane_count > 0)
plane_segments = stream_segments / context->stream_status[i].plane_count;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 55f918b44077..c6a0e84885a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -655,8 +655,6 @@ static const struct resource_caps res_cap_dcn321 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -724,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1491,8 +1490,11 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1996,7 +1998,7 @@ static bool dcn321_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 9d0f79dff2e3..01db035589c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -129,7 +129,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
DML += dcn20/dcn20_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index d3ba65efe1d2..38d1f2be8cf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -973,7 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
@@ -1340,7 +1341,7 @@ int dcn20_populate_dml_pipes_from_context(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
- if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&res_ctx->pipe_ctx[i]))
pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index c3d75e56410c..d0303173ce80 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -23,9 +23,7 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_30.h"
#include "../dml_inline_defs.h"
@@ -6635,4 +6633,3 @@ static noinline_for_stack void UseMinimumDCFCLK(
}
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8179be1f34bb..cd3cfcb2a2b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -23,8 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
-
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
@@ -1792,4 +1790,3 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 27f488405335..536a63624595 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
@@ -4308,11 +4307,11 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4320,107 +4319,201 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 35d10b4d018b..2244e4fb8c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -902,7 +902,6 @@ static void dml_rq_dlg_get_dlg_params(
double hratio_c;
double vratio_l;
double vratio_c;
- bool scl_enable;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
@@ -1020,7 +1019,6 @@ static void dml_rq_dlg_get_dlg_params(
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
- scl_enable = scl->scl_enable;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index acda3e1babd4..c52b76610bd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -308,6 +308,10 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
upscaled = true;
+ /* Apply HostVM policy - either based on hypervisor globally enabled, or rIOMMU active */
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
+ pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled || dc->res_pool->hubbub->riommu_active;
+
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index c843b394aeb4..daf319370190 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -27,7 +27,6 @@
#define UNIT_TEST 0
#if !UNIT_TEST
#include "dc.h"
-#include "dc_link.h"
#endif
#include "../display_mode_lib.h"
#include "display_mode_vba_314.h"
@@ -4406,11 +4405,11 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4418,107 +4417,201 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index e47828e3b6d5..6b29d3a9520f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1270,7 +1270,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -2315,6 +2315,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -2423,7 +2426,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -2432,7 +2434,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 3b2a014ccf8f..f74730c2abbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_32.h"
#include "../dml_inline_defs.h"
@@ -690,7 +689,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&
@@ -3216,7 +3216,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,
mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index d1000aa4c481..61cc4904ade4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -6271,7 +6271,8 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[])
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting)
{
int k;
double SwathSizeAllSurfaces = 0;
@@ -6283,6 +6284,9 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
+ if (UseUnboundedRequesting == dm_unbounded_requesting)
+ return false;
+
/* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 9ba792c633a5..592d174df6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -1163,6 +1163,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]);
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index b80cef70fa60..57b9bd896678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -294,6 +294,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -402,7 +405,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -411,7 +413,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d52cbc0e9b67..2bdc47615543 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -47,6 +47,59 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC)
+ return dc_dsc_stream_bandwidth_in_kbps(timing,
+ timing->dsc_cfg.bits_per_pixel,
+ timing->dsc_cfg.num_slices_h,
+ timing->dsc_cfg.is_dp);
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ ASSERT(bits_per_channel != 0);
+ bits_per_channel = 8;
+ break;
+ }
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+}
+
+
/* Forward Declerations */
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
@@ -79,8 +132,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -352,6 +404,11 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
+ struct dc_dsc_config_options options = {0};
+
+ options.dsc_min_slice_height_override = dsc_min_slice_height_override;
+ options.max_target_bpp_limit_override_x16 = max_bpp_x16;
+ options.slice_height_granularity = 1;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
@@ -360,7 +417,7 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- dsc_min_slice_height_override, max_bpp_x16, &config);
+ &options, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -740,8 +797,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -760,7 +816,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override_x16, &policy);
+ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -909,12 +965,13 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- if (min_slice_height_override == 0)
+ if (options->dsc_min_slice_height_override == 0)
slice_height = min(policy.min_slice_height, pic_height);
else
- slice_height = min(min_slice_height_override, pic_height);
+ slice_height = min((int)(options->dsc_min_slice_height_override), pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
+ slice_height % options->slice_height_granularity != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
slice_height++;
@@ -958,8 +1015,7 @@ done:
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -971,8 +1027,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_min_slice_height_override,
- max_target_bpp_limit_override * 16, dsc_cfg);
+ timing, options, dsc_cfg);
return is_dsc_possible;
}
@@ -1104,3 +1159,10 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
{
dsc_policy_disable_dsc_stream_overhead = disable;
}
+
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
+{
+ options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
+ options->max_target_bpp_limit_override_x16 = 0;
+ options->slice_height_granularity = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index e97cf09be9d5..64cee8c80110 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -39,6 +39,7 @@
*/
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
{
+#if defined(CONFIG_DRM_AMD_DC_FP)
enum colour_mode mode;
enum bits_per_comp bpc;
bool is_navite_422_or_420;
@@ -59,4 +60,5 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
slice_width, slice_height,
pps->dsc_version_minor);
DC_FP_END();
+#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index e1422e5e86c9..25ffc052d53b 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dm_helpers.h"
-#include "include/hdcp_types.h"
+#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
#include "link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index ed3c03108da6..2eb597a24425 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -51,9 +51,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
#include "link_hwss.h"
/********** DAL Core*********************/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 131fcfa28bca..f4aa76e02518 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -70,28 +70,38 @@ struct dpp_input_csc_matrix {
};
static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} },
- {COLOR_SPACE_2020_YCBCR,
- {0x2F30, 0x2000, 0, 0xE869, 0xEDB7, 0x2000, 0xFABC, 0xBC6, 0,
- 0x2000, 0x3C34, 0xE1E6} },
- {COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- {0x35E0, 0x255F, 0, 0xE2B3, 0xEB20, 0x255F, 0xF9FD, 0xB1E, 0,
- 0x255F, 0x44BD, 0xDB43} }
+ { COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_SRGB_LIMITED,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_YCBCR601,
+ { 0x2cdd, 0x2000, 0, 0xe991,
+ 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6 } },
+ { COLOR_SPACE_YCBCR601_LIMITED,
+ { 0x3353, 0x2568, 0, 0xe400,
+ 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a } },
+ { COLOR_SPACE_YCBCR709,
+ { 0x3265, 0x2000, 0, 0xe6ce,
+ 0xf105, 0x2000, 0xfa01, 0xa7d,
+ 0, 0x2000, 0x3b61, 0xe24f } },
+ { COLOR_SPACE_YCBCR709_LIMITED,
+ { 0x39a6, 0x2568, 0, 0xe0d6,
+ 0xeedd, 0x2568, 0xf925, 0x9a8,
+ 0, 0x2568, 0x43ee, 0xdbb2 } },
+ { COLOR_SPACE_2020_YCBCR,
+ { 0x2F30, 0x2000, 0, 0xE869,
+ 0xEDB7, 0x2000, 0xFABC, 0xBC6,
+ 0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ { 0x35E0, 0x255F, 0, 0xE2B3,
+ 0xEB20, 0x255F, 0xF9FD, 0xB1E,
+ 0, 0x255F, 0x44BD, 0xDB43 } }
};
struct dpp_grph_csc_adjustment {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index b982be64c792..86b711dcc785 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -53,9 +53,7 @@ enum dwb_source {
/* DCN1.x, DCN2.x support 2 pipes */
enum dwb_pipe {
dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dwb_pipe1,
-#endif
dwb_pipe_max_num,
};
@@ -72,14 +70,11 @@ enum wbscl_coef_filter_type_sel {
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_boundary_mode {
DWBSCL_BOUNDARY_MODE_EDGE = 0,
DWBSCL_BOUNDARY_MODE_BLACK = 1
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_output_csc_mode {
DWB_OUTPUT_CSC_DISABLE = 0,
DWB_OUTPUT_CSC_COEF_A = 1,
@@ -132,7 +127,6 @@ struct dwb_efc_display_settings {
unsigned int dwbOutputBlack; // 0 - Normal, 1 - Output Black
};
-#endif
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -208,7 +202,7 @@ struct dwbc_funcs {
struct dwb_warmup_params *warmup_params);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index d5ea7545583e..b5d353c41aa9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -146,7 +146,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index a819f0f97c5f..b95ae9596c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -275,20 +275,6 @@ enum dc_lut_mode {
LUT_RAM_B
};
-enum symclk_state {
- SYMCLK_OFF_TX_OFF,
- SYMCLK_ON_TX_ON,
- SYMCLK_ON_TX_OFF,
-};
-
-struct phy_state {
- struct {
- uint8_t otg : 1;
- uint8_t reserved : 7;
- } symclk_ref_cnts;
- enum symclk_state symclk_state;
-};
-
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index ec572a9e4054..dbe7afa9d3a2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -75,58 +75,6 @@ struct encoder_feature_support {
bool fec_supported;
};
-union dpcd_psr_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
- unsigned char CRC_VERIFICATION : 1;
- unsigned char FRAME_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char LINE_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
- unsigned char ENABLE_PSR2 : 1;
- /* For eDP 1.5, PSR v2 w/ early transport */
- unsigned char EARLY_TRANSPORT_ENABLE : 1;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_alpm_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_sink_active_vtotal_control_mode {
- struct {
- unsigned char ENABLE : 1;
- unsigned char RESERVED : 7;
- } bits;
- unsigned char raw;
-};
-
-union psr_error_status {
- struct {
- unsigned char LINK_CRC_ERROR :1;
- unsigned char RFB_STORAGE_ERROR :1;
- unsigned char VSC_SDP_ERROR :1;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
-union psr_sink_psr_status {
- struct {
- unsigned char SINK_SELF_REFRESH_STATUS :3;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
struct link_encoder {
const struct link_encoder_funcs *funcs;
int32_t aux_channel_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index bb5ad70d4266..c4fbbf08ef86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -30,7 +30,6 @@
#include "audio_types.h"
#include "hw_shared.h"
-#include "dc_link.h"
struct dc_bios;
struct dc_context;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 1d9f9c53d2bd..c21e7ffd5bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -182,7 +182,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index a4d61bb724b6..45d37c584551 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -148,7 +148,7 @@ struct hwseq_private_funcs {
void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*subvp_update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index e70fa0059223..11aaa7a9518a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -28,17 +28,57 @@
/* FILE POLICY AND INTENDED USAGE:
*
- * This header declares link functions exposed to dc. All functions must have
- * "link_" as prefix. For example link_run_my_function. This header is strictly
- * private in dc and should never be included in other header files. dc
- * components should include this header in their .c files in order to access
- * functions in link folder. This file should never include any header files in
- * link folder. If there is a need to expose a function declared in one of
- * header files in side link folder, you need to move the function declaration
- * into this file and prefix it with "link_".
+ * This header defines link component function interfaces aka link_service.
+ * link_service provides the only entry point to link functions with function
+ * pointer style. This header is strictly private in dc and should never be
+ * included by DM because it exposes too much dc detail including all dc
+ * private types defined in core_types.h. Otherwise it will break DM - DC
+ * encapsulation and turn DM into a maintenance nightmare.
+ *
+ * The following shows a link component relation map.
+ *
+ * DM to DC:
+ * DM includes dc.h
+ * dc_link_exports.c or other dc files implement dc.h
+ *
+ * DC to Link:
+ * dc_link_exports.c or other dc files include link.h
+ * link_factory.c implements link.h
+ *
+ * Link sub-component to Link sub-component:
+ * link_factory.c includes --> link_xxx.h
+ * link_xxx.c implements link_xxx.h
+
+ * As you can see if you ever need to add a new dc link function and call it on
+ * DM/dc side, it is very difficult because you will need layers of translation.
+ * The most appropriate approach to implement new requirements on DM/dc side is
+ * to extend or generalize the functionality of existing link function
+ * interfaces so minimal modification is needed outside link component to
+ * achieve your new requirements. This approach reduces or even eliminates the
+ * effort needed outside link component to support a new link feature. This also
+ * reduces code discrepancy among DMs to support the same link feature. If we
+ * test full code path on one version of DM, and there is no feature specific
+ * modification required on other DMs, then we can have higher confidence that
+ * the feature will run on other DMs and produce the same result. The following
+ * are some good examples to start with:
+ *
+ * - detect_link --> to add new link detection or capability retrieval routines
+ *
+ * - validate_mode_timing --> to add new timing validation conditions
+ *
+ * - set_dpms_on/set_dpms_off --> to include new link enablement sequences
+ *
+ * If you must add new link functions, you will need to:
+ * 1. declare the function pointer here under the suitable commented category.
+ * 2. Implement your function in the suitable link_xxx.c file.
+ * 3. Assign the function to link_service in link_factory.c
+ * 4. NEVER include link_xxx.h headers outside link component.
+ * 5. NEVER include link.h on DM side.
*/
#include "core_types.h"
-#include "dc_link.h"
+
+struct link_service *link_create_link_service(void);
+void link_destroy_link_service(struct link_service **link_srv);
struct link_init_data {
const struct dc *dc;
@@ -49,14 +89,6 @@ struct link_init_data {
bool is_dpia_link;
};
-struct dc_link *link_create(const struct link_init_data *init_params);
-void link_destroy(struct dc_link **link);
-
-// TODO - convert any function declarations below to function pointers
-struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service);
-
struct ddc_service_init_data {
struct graphics_object_id id;
struct dc_context *ctx;
@@ -64,94 +96,221 @@ struct ddc_service_init_data {
bool is_dpia_link;
};
-struct ddc_service *link_create_ddc_service(
- struct ddc_service_init_data *ddc_init_data);
+struct link_service {
+ /************************** Factory ***********************************/
+ struct dc_link *(*create_link)(
+ const struct link_init_data *init_params);
+ void (*destroy_link)(struct dc_link **link);
-void link_destroy_ddc_service(struct ddc_service **ddc);
-bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+ /************************** Detection *********************************/
+ bool (*detect_link)(struct dc_link *link, enum dc_detect_reason reason);
+ bool (*detect_connection_type)(struct dc_link *link,
+ enum dc_connection_type *type);
+ struct dc_sink *(*add_remote_sink)(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+ void (*remove_remote_sink)(struct dc_link *link, struct dc_sink *sink);
+ bool (*get_hpd_state)(struct dc_link *link);
+ struct gpio *(*get_hpd_gpio)(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*enable_hpd_filter)(struct dc_link *link, bool enable);
+ bool (*reset_cur_dp_mst_topology)(struct dc_link *link);
+ const struct dc_link_status *(*get_status)(const struct dc_link *link);
+ bool (*is_hdcp1x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ bool (*is_hdcp2x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ void (*clear_dprx_states)(struct dc_link *link);
-bool link_query_ddc_data(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *write_buf,
- uint32_t write_size,
- uint8_t *read_buf,
- uint32_t read_size);
+ /*************************** Resource *********************************/
+ void (*get_cur_res_map)(const struct dc *dc, uint32_t *map);
+ void (*restore_res_map)(const struct dc *dc, uint32_t *map);
+ void (*get_cur_link_res)(const struct dc_link *link,
+ struct link_resource *link_res);
-/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
- * states as outlined in the DP spec. Returns true if the request was
- * successful.
- *
- * NOTE: The function requires explicit mutex on DM side in order to prevent
- * potential race condition. DC components should call the dpcd read/write
- * function in dm_helpers in order to access dpcd safely
- */
-bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
- struct aux_payload *payload);
-uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+ /*************************** Validation *******************************/
+ enum dc_status (*validate_mode_timing)(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+ uint32_t (*dp_link_bandwidth_kbps)(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
-bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-enum dp_link_encoding link_dp_get_encoding_format(
- const struct dc_link_settings *link_settings);
+ /*************************** DPMS *************************************/
+ void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*set_dpms_off)(struct pipe_ctx *pipe_ctx);
+ void (*resume)(struct dc_link *link);
+ void (*blank_all_dp_displays)(struct dc *dc);
+ void (*blank_all_edp_displays)(struct dc *dc);
+ void (*blank_dp_stream)(struct dc_link *link, bool hw_init);
+ enum dc_status (*increase_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ enum dc_status (*reduce_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ void (*set_dsc_on_stream)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*set_dsc_enable)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*update_dsc_config)(struct pipe_ctx *pipe_ctx);
-bool link_decide_link_settings(
- struct dc_stream_state *stream,
- struct dc_link_settings *link_setting);
-
-void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
- bool power_up);
-uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
-uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
-
-bool link_is_edp_ilr_optimization_required(struct dc_link *link,
- struct dc_crtc_timing *crtc_timing);
-
-bool link_backlight_enable_aux(struct dc_link *link, bool enable);
-void link_edp_add_delay_for_T9(struct dc_link *link);
-bool link_edp_receiver_ready_T9(struct dc_link *link);
-bool link_edp_receiver_ready_T7(struct dc_link *link);
-bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
-bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
- uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
-void link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
-enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-void link_blank_all_dp_displays(struct dc *dc);
-void link_blank_all_edp_displays(struct dc *dc);
-void link_blank_dp_stream(struct dc_link *link, bool hw_init);
-void link_resume(struct dc_link *link);
-void link_set_dpms_on(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx);
-void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
-void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
-void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
-enum dc_status link_validate_mode_timing(
- const struct dc_stream_state *stream,
+
+ /*************************** DDC **************************************/
+ struct ddc_service *(*create_ddc_service)(
+ struct ddc_service_init_data *ddc_init_data);
+ void (*destroy_ddc_service)(struct ddc_service **ddc);
+ bool (*query_ddc_data)(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+ int (*aux_transfer_raw)(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+ bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc,
+ struct aux_payload *payload);
+ bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc);
+ uint32_t (*get_aux_defer_delay)(struct ddc_service *ddc);
+
+
+ /*************************** DP Capability ****************************/
+ bool (*dp_is_sink_present)(struct dc_link *link);
+ bool (*dp_is_fec_supported)(const struct dc_link *link);
+ bool (*dp_is_128b_132b_signal)(struct pipe_ctx *pipe_ctx);
+ bool (*dp_get_max_link_enc_cap)(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+ const struct dc_link_settings *(*dp_get_verified_link_cap)(
+ const struct dc_link *link);
+ enum dp_link_encoding (*dp_get_encoding_format)(
+ const struct dc_link_settings *link_settings);
+ bool (*dp_should_enable_fec)(const struct dc_link *link);
+ bool (*dp_decide_link_settings)(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+ enum dp_link_encoding (*mst_decide_link_encoding_format)(
+ const struct dc_link *link);
+ bool (*edp_decide_link_settings)(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
+ uint32_t (*bw_kbps_from_raw_frl_link_rate_data)(uint8_t bw);
+ bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
+ enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+
+ /*************************** DP DPIA/PHY ******************************/
+ int (*dpia_handle_usb4_bandwidth_allocation_for_link)(
+ struct dc_link *link, int peak_bw);
+ void (*dpia_handle_bw_alloc_response)(
+ struct dc_link *link, uint8_t bw, uint8_t result);
+ void (*dp_set_drive_settings)(
struct dc_link *link,
- const struct dc_crtc_timing *timing);
-bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
-bool link_detect_connection_type(struct dc_link *link,
- enum dc_connection_type *type);
-const struct dc_link_status *link_get_status(const struct dc_link *link);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-/* return true if the connected receiver supports the hdcp version */
-bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
-bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
-void link_clear_dprx_states(struct dc_link *link);
-bool link_reset_cur_dp_mst_topology(struct dc_link *link);
-uint32_t dp_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_settings);
-uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing);
-void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
-void link_restore_res_map(const struct dc *dc, uint32_t *map);
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+ void (*dpcd_write_rx_power_ctrl)(struct dc_link *link, bool on);
+
+ /*************************** DP IRQ Handler ***************************/
+ bool (*dp_parse_link_loss_status)(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+ bool (*dp_should_allow_hpd_rx_irq)(const struct dc_link *link);
+ void (*dp_handle_link_loss)(struct dc_link *link);
+ enum dc_status (*dp_read_hpd_rx_irq_data)(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+ bool (*dp_handle_hpd_rx_irq)(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data,
+ bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+
+
+ /*************************** eDP Panel Control ************************/
+ void (*edp_panel_backlight_power_on)(
+ struct dc_link *link, bool wait_for_hpd);
+ int (*edp_get_backlight_level)(const struct dc_link *link);
+ bool (*edp_get_backlight_level_nits)(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+ bool (*edp_set_backlight_level)(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+ bool (*edp_set_backlight_level_nits)(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+ int (*edp_get_target_backlight_pwm)(const struct dc_link *link);
+ bool (*edp_get_psr_state)(
+ const struct dc_link *link, enum dc_psr_state *state);
+ bool (*edp_set_psr_allow_active)(
+ struct dc_link *link,
+ const bool *allow_active,
+ bool wait,
+ bool force_static,
+ const unsigned int *power_opts);
+ bool (*edp_setup_psr)(struct dc_link *link,
+ const struct dc_stream_state *stream,
+ struct psr_config *psr_config,
+ struct psr_context *psr_context);
+ bool (*edp_set_sink_vtotal_in_psr_active)(
+ const struct dc_link *link,
+ uint16_t psr_vtotal_idle,
+ uint16_t psr_vtotal_su);
+ void (*edp_get_psr_residency)(
+ const struct dc_link *link, uint32_t *residency);
+ bool (*edp_wait_for_t12)(struct dc_link *link);
+ bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+ bool (*edp_backlight_enable_aux)(struct dc_link *link, bool enable);
+ void (*edp_add_delay_for_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T7)(struct dc_link *link);
+ bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
+
+
+ /*************************** DP CTS ************************************/
+ void (*dp_handle_automated_test)(struct dc_link *link);
+ bool (*dp_set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+ void (*dp_set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*dp_set_preferred_training_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+
+ /*************************** DP Trace *********************************/
+ bool (*dp_trace_is_initialized)(struct dc_link *link);
+ void (*dp_trace_set_is_logged_flag)(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+ bool (*dp_trace_is_logged)(struct dc_link *link, bool in_detection);
+ unsigned long long (*dp_trace_get_lt_end_timestamp)(
+ struct dc_link *link, bool in_detection);
+ const struct dp_trace_lt_counts *(*dp_trace_get_lt_counts)(
+ struct dc_link *link, bool in_detection);
+ unsigned int (*dp_trace_get_link_loss_count)(struct dc_link *link);
+ void (*dp_trace_set_edp_power_timestamp)(struct dc_link *link,
+ bool power_up);
+ uint64_t (*dp_trace_get_edp_poweron_timestamp)(struct dc_link *link);
+ uint64_t (*dp_trace_get_edp_poweroff_timestamp)(struct dc_link *link);
+ void (*dp_trace_source_sequence)(
+ struct dc_link *link, uint8_t dp_test_mode);
+};
#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index fa6da93caa88..eaeb684c8a48 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -201,7 +201,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
const struct resource_context *res_ctx,
const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index 40352d8d7648..a52b56e2859e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -55,7 +55,7 @@ LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \
link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \
link_dp_training_dpia.o link_dp_training_auxless.o \
link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \
-link_edp_panel_control.o link_dp_irq_handler.o
+link_edp_panel_control.o link_dp_irq_handler.o link_dp_dpia_bw.o
AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \
$(LINK_PROTOCOLS))
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 942300e0bd92..db9f1baa27e5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -28,6 +28,7 @@
#include "link/protocols/link_dp_training.h"
#include "link/protocols/link_dp_phy.h"
#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h"
+#include "link/protocols/link_dp_capability.h"
#include "link/link_dpms.h"
#include "resource.h"
#include "dm_helpers.h"
@@ -75,7 +76,7 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
return false;
}
-void dp_retrain_link_dp_test(struct dc_link *link,
+static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
@@ -250,7 +251,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
/* prepare link training settings */
link_training_settings.link_settings = link->cur_link_settings;
- link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings);
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
@@ -408,7 +409,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
* all the time. Do not touch it.
* forward request to DS
*/
- dc_link_dp_set_test_pattern(
+ dp_set_test_pattern(
link,
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
@@ -585,7 +586,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
}
-void dc_link_dp_handle_automated_test(struct dc_link *link)
+void dp_handle_automated_test(struct dc_link *link)
{
union test_request test_request;
union test_response test_response;
@@ -651,7 +652,7 @@ void dc_link_dp_handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_dp_set_test_pattern(
+bool dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
enum dp_test_pattern_color_space test_pattern_color_space,
@@ -941,28 +942,9 @@ bool dc_link_dp_set_test_pattern(
return true;
}
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
- struct link_resource link_res;
-
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i] == link)
- break;
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- link_get_cur_link_res(link, &link_res);
- dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
+void dp_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
{
int i;
struct pipe_ctx *pipe;
@@ -1001,11 +983,11 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
dp_retrain_link_dp_test(link, &store_settings, false);
}
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain)
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
{
if (lt_overrides != NULL)
link->preferred_training_settings = *lt_overrides;
@@ -1025,22 +1007,5 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
/* Retrain now, or wait until next stream update to apply */
if (skip_immediate_retrain == false)
- dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
-}
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
+ dp_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index 7f17838b653b..eae23ea7f6ec 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -25,9 +25,20 @@
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
#include "link.h"
-
-void dp_retrain_link_dp_test(struct dc_link *link,
+void dp_handle_automated_test(struct dc_link *link);
+bool dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+void dp_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-
+ struct dc_link *link);
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
#endif /* __LINK_DP_CTS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index 459b362ed374..fbcd8fb58ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -37,7 +37,7 @@ void dp_trace_reset(struct dc_link *link)
memset(&link->dp_trace, 0, sizeof(link->dp_trace));
}
-bool dc_dp_trace_is_initialized(struct dc_link *link)
+bool dp_trace_is_initialized(struct dc_link *link)
{
return link->dp_trace.is_initialized;
}
@@ -76,7 +76,7 @@ void dp_trace_lt_total_count_increment(struct dc_link *link,
link->dp_trace.commit_lt_trace.counts.total++;
}
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged)
{
@@ -86,8 +86,7 @@ void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
link->dp_trace.commit_lt_trace.is_logged = is_logged;
}
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection)
+bool dp_trace_is_logged(struct dc_link *link, bool in_detection)
{
if (in_detection)
return link->dp_trace.detect_lt_trace.is_logged;
@@ -123,7 +122,7 @@ void dp_trace_set_lt_end_timestamp(struct dc_link *link,
link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx);
}
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -132,7 +131,7 @@ unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
return link->dp_trace.commit_lt_trace.timestamps.end;
}
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -141,12 +140,12 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
return &link->dp_trace.commit_lt_trace.counts;
}
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link)
{
return link->dp_trace.link_loss_count;
}
-void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
bool power_up)
{
if (!power_up)
@@ -156,17 +155,17 @@ void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);
}
-uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweron;
}
-uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweroff;
}
-void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode)
{
if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index 89feea1b2692..ab437a0c9101 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -28,7 +28,7 @@
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
+bool dp_trace_is_initialized(struct dc_link *link);
void dp_trace_detect_lt_init(struct dc_link *link);
void dp_trace_commit_lt_init(struct dc_link *link);
void dp_trace_link_loss_increment(struct dc_link *link);
@@ -37,10 +37,10 @@ void dp_trace_lt_fail_count_update(struct dc_link *link,
bool in_detection);
void dp_trace_lt_total_count_increment(struct dc_link *link,
bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
+bool dp_trace_is_logged(struct dc_link *link,
bool in_detection);
void dp_trace_lt_result_update(struct dc_link *link,
enum link_training_result result,
@@ -49,10 +49,15 @@ void dp_trace_set_lt_start_timestamp(struct dc_link *link,
bool in_detection);
void dp_trace_set_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link);
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up);
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode);
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b092b00b3599..bebf9c4c8702 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -44,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
@@ -63,7 +63,8 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->id,
false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
@@ -105,7 +106,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
&stream->timing);
if (dc_is_dp_signal(stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
void enable_dio_dp_link_output(struct dc_link *link,
@@ -126,7 +128,8 @@ void enable_dio_dp_link_output(struct dc_link *link,
link_enc,
link_settings,
clock_source);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void disable_dio_link_output(struct dc_link *link,
@@ -136,7 +139,8 @@ void disable_dio_link_output(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->disable_output(link_enc, signal);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
void set_dio_dp_link_test_pattern(struct dc_link *link,
@@ -146,7 +150,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
void set_dio_dp_lane_settings(struct dc_link *link,
@@ -195,7 +199,8 @@ void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc, false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
@@ -214,7 +219,8 @@ void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index aa1c5e253b43..edd7d026a762 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -68,7 +68,8 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank;
uint32_t link_bw_in_kbps =
- dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings);
+ hpo_dp_stream_encoder->ctx->dc->link_srv->dp_link_bandwidth_kbps(
+ pipe_ctx->stream->link, link_settings);
uint16_t hblank_min_symbol_width = 0;
if (link_bw_in_kbps > 0) {
@@ -115,7 +116,8 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
stream->use_vsc_sdp_for_colorimetry,
stream->timing.flags.DSC,
false);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
@@ -201,7 +203,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link,
{
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
link_res->hpo_dp_link_enc, tp_params);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
static void set_hpo_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 38216c789d77..fee71ebdfc73 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -466,7 +466,6 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
{
struct hdcp_protection_message msg22;
@@ -508,7 +507,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
}
}
-#endif // CONFIG_DRM_AMD_DC_HDCP
static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = {0};
@@ -855,6 +853,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
enum dc_connection_type new_connection_type = dc_connection_none;
+ enum dc_connection_type pre_connection_type = link->type;
const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -878,7 +877,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
- if (!dc_link_detect_connection_type(link, &new_connection_type)) {
+ if (!link_detect_connection_type(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -957,6 +956,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (!detect_dp(link, &sink_caps, reason)) {
+ link->type = pre_connection_type;
+
if (prev_sink)
dc_sink_release(prev_sink);
return false;
@@ -1084,9 +1085,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
* TODO debug why certain monitors don't like
* two link trainings
*/
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
query_hdcp_capability(sink->sink_signal, link);
-#endif
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1094,9 +1093,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
query_hdcp_capability(sink->sink_signal, link);
-#endif
}
/* HDMI-DVI Dongle */
@@ -1162,9 +1159,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
/* From Connected-to-Disconnected. */
link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps));
-#endif
/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
* is not cleared. If we emulate a DP signal on this connection, it thinks
* the dongle is still there and limits the number of modes we can emulate.
@@ -1189,7 +1184,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
/**
- * dc_link_detect_connection_type() - Determine if there is a sink connected
+ * link_detect_connection_type() - Determine if there is a sink connected
*
* @type: Returned connection type
* Does not detect downstream devices, such as MST sinks
@@ -1213,7 +1208,7 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
+ if (link->is_hpd_pending || !dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -1244,11 +1239,16 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool is_delegated_to_mst_top_mgr = false;
enum dc_connection_type pre_link_type = link->type;
+ DC_LOGGER_INIT(link->ctx->logger);
+
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
verify_link_capability(link, link->local_sink, reason);
+ DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
+ link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
+
if (is_local_sink_detect_success && link->local_sink &&
dc_is_dp_signal(link->local_sink->sink_signal) &&
link->dpcd_caps.is_mst_capable)
@@ -1266,7 +1266,6 @@ void link_clear_dprx_states(struct dc_link *link)
{
memset(&link->dprx_states, 0, sizeof(link->dprx_states));
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
bool link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
@@ -1314,10 +1313,108 @@ bool link_is_hdcp22(struct dc_link *link, enum signal_type signal)
return ret;
}
-#endif // CONFIG_DRM_AMD_DC_HDCP
const struct dc_link_status *link_get_status(const struct dc_link *link)
{
return &link->link_status;
}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ /*
+ * Treat device as no EDID device if EDID
+ * parsing fails
+ */
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
+ dc_sink->dc_edid.length = 0;
+ dm_error("Bad EDID, status%d!\n", edid_status);
+ }
+
+ return dc_sink;
+
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 1831636516fb..7da05078721e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -26,5 +26,18 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
#include "link.h"
-
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
+bool link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink);
+bool link_reset_cur_dp_mst_topology(struct dc_link *link);
+const struct dc_link_status *link_get_status(const struct dc_link *link);
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
+void link_clear_dprx_states(struct dc_link *link);
#endif /* __DC_LINK_DETECTION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 257e1c3ba00a..020d668ce09e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -37,6 +37,7 @@
#include "link_dpms.h"
#include "link_hwss.h"
+#include "link_validation.h"
#include "accessories/link_fpga.h"
#include "accessories/link_dp_trace.h"
#include "protocols/link_dpcd.h"
@@ -46,6 +47,7 @@
#include "protocols/link_dp_capability.h"
#include "protocols/link_dp_training.h"
#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_dp_dpia_bw.h"
#include "dm_helpers.h"
#include "link_enc_cfg.h"
@@ -136,7 +138,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
- dc_link_dp_receiver_power_ctrl(link, false);
+ dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -646,7 +648,6 @@ static void write_i2c_redriver_setting(
if (!i2c_success)
DC_LOG_DEBUG("Set redriver failed");
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
@@ -672,7 +673,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
/* stream encoder index */
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
config.stream_enc_idx =
pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
@@ -681,7 +682,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
/* link encoder index */
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
/* dio output index is dpia index for DPIA endpoint & dcio index by default */
@@ -702,7 +703,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
config.mst_enabled = (pipe_ctx->stream->signal ==
SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
- config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
+ config.dp2_enabled = dp_is_128b_132b_signal(pipe_ctx) ? 1 : 0;
config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
1 : 0;
config.dpms_off = dpms_off;
@@ -712,7 +713,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
-#endif
static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
{
@@ -817,7 +817,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* Enable DSC in encoder */
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
- && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -843,7 +843,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* disable DSC in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
false,
@@ -902,7 +902,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
true,
@@ -919,7 +919,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
/* disable DSC PPS in stream encoder */
memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
false,
@@ -1001,7 +1001,7 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+static void log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
{
const uint32_t VCP_Y_PRECISION = 1000;
uint64_t vcp_x, vcp_y;
@@ -1044,7 +1044,7 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
+ uint32_t link_rate_in_mbytes_per_sec = dp_link_bandwidth_kbps(stream->link,
&stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
@@ -1153,7 +1153,7 @@ static bool poll_for_allocation_change_trigger(struct dc_link *link)
break;
}
- msleep(5);
+ fsleep(5000);
}
if (result == ACT_FAILED) {
@@ -1517,7 +1517,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
@@ -1535,7 +1535,7 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
{
struct fixed31_32 link_bw_effective =
dc_fixpt_from_int(
- dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ dp_link_bandwidth_kbps(link, &link->cur_link_settings));
struct fixed31_32 timeslot_bw_effective =
dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
struct fixed31_32 timing_bw =
@@ -1640,7 +1640,7 @@ static bool write_128b_132b_sst_payload_allocation_table(
}
}
retries++;
- msleep(5);
+ fsleep(5000);
}
if (!result && retries == max_retries) {
@@ -1670,7 +1670,7 @@ static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
if (!allocate) {
avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
@@ -1721,7 +1721,7 @@ static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
DP_128b_132b_ENCODING) {
avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
@@ -2044,11 +2044,17 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
- /* Train with fallback when enabling DPIA link. Conventional links are
+ /*
+ * If the link is DP-over-USB4 do the following:
+ * - Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
+ * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+ * in case stream needs more or less bw from what has been allocated
+ * earlier at plug time.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
do_fallback = true;
+ }
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2117,7 +2123,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
set_default_brightness_aux(link); // TODO: use cached if known
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
- link_backlight_enable_aux(link, true);
+ edp_backlight_enable_aux(link, true);
}
return status;
@@ -2237,7 +2243,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
ASSERT(is_master_pipe_for_link(link, pipe_ctx));
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2262,15 +2268,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
-#endif
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link_is_dp_128b_132b_signal(pipe_ctx))
+ dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, false);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
@@ -2299,7 +2303,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ !dp_is_128b_132b_signal(pipe_ctx)) {
/* In DP1.x SST mode, our encoder will go to TPS1
* when link is on but stream is off.
@@ -2319,7 +2323,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (dc_is_dp_signal(pipe_ctx->stream->signal))
link_set_dsc_enable(pipe_ctx, false);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dp_is_128b_132b_signal(pipe_ctx)) {
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
}
@@ -2343,7 +2347,7 @@ void link_set_dpms_on(
ASSERT(is_master_pipe_for_link(link, pipe_ctx));
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2365,7 +2369,7 @@ void link_set_dpms_on(
ASSERT(link_enc);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
- && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
if (link_enc)
link_enc->funcs->setup(
link_enc,
@@ -2375,7 +2379,7 @@ void link_set_dpms_on(
pipe_ctx->stream->link->link_state_valid = true;
if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
otg_out_dest = OUT_MUX_HPO_DP;
else
otg_out_dest = OUT_MUX_DIO;
@@ -2398,7 +2402,7 @@ void link_set_dpms_on(
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
@@ -2410,9 +2414,7 @@ void link_set_dpms_on(
dc->hwss.enable_audio_stream(pipe_ctx);
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
return;
}
@@ -2422,9 +2424,7 @@ void link_set_dpms_on(
!pipe_ctx->stream->timing.flags.DSC &&
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
return;
}
@@ -2477,7 +2477,7 @@ void link_set_dpms_on(
* from transmitter control.
*/
if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
- link_is_dp_128b_132b_signal(pipe_ctx)))
+ dp_is_128b_132b_signal(pipe_ctx)))
if (link_enc)
link_enc->funcs->setup(
link_enc,
@@ -2497,7 +2497,7 @@ void link_set_dpms_on(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link_is_dp_128b_132b_signal(pipe_ctx))
+ dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
dc->hwss.unblank_stream(pipe_ctx,
@@ -2508,14 +2508,12 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 33d312dabdb8..9398f9c1666a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -27,14 +27,27 @@
#define __DC_LINK_DPMS_H__
#include "link.h"
-bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
- bool enable, bool immediate_update);
-struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link);
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
+void link_resume(struct dc_link *link);
+void link_blank_all_dp_displays(struct dc *dc);
+void link_blank_all_edp_displays(struct dc *dc);
+void link_blank_dp_stream(struct dc_link *link, bool hw_init);
void link_set_all_streams_dpms_off_for_link(struct dc_link *link);
void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
struct dc_state *state,
uint8_t *count,
struct pipe_ctx *pipes[MAX_PIPES]);
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
+ bool enable, bool immediate_update);
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
#endif /* __DC_LINK_DPMS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index aeb26a4d539e..3951d48118c4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -27,7 +27,20 @@
* This file owns the creation/destruction of link structure.
*/
#include "link_factory.h"
+#include "link_detection.h"
+#include "link_resource.h"
+#include "link_validation.h"
+#include "link_dpms.h"
+#include "accessories/link_dp_cts.h"
+#include "accessories/link_dp_trace.h"
+#include "accessories/link_fpga.h"
#include "protocols/link_ddc.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia_bw.h"
+#include "protocols/link_dp_dpia.h"
+#include "protocols/link_dp_irq_handler.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_training.h"
#include "protocols/link_edp_panel_control.h"
#include "protocols/link_hpd.h"
#include "gpio_service_interface.h"
@@ -39,7 +52,248 @@
DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
-static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
+/* link factory owns the creation/destruction of link structures. */
+static void construct_link_service_factory(struct link_service *link_srv)
+{
+
+ link_srv->create_link = link_create;
+ link_srv->destroy_link = link_destroy;
+}
+
+/* link_detection manages link detection states and receiver states by using
+ * various link protocols. It also provides helper functions to interpret
+ * certain capabilities or status based on the states it manages or retrieve
+ * them directly from connected receivers.
+ */
+static void construct_link_service_detection(struct link_service *link_srv)
+{
+ link_srv->detect_link = link_detect;
+ link_srv->detect_connection_type = link_detect_connection_type;
+ link_srv->add_remote_sink = link_add_remote_sink;
+ link_srv->remove_remote_sink = link_remove_remote_sink;
+ link_srv->get_hpd_state = link_get_hpd_state;
+ link_srv->get_hpd_gpio = link_get_hpd_gpio;
+ link_srv->enable_hpd = link_enable_hpd;
+ link_srv->disable_hpd = link_disable_hpd;
+ link_srv->enable_hpd_filter = link_enable_hpd_filter;
+ link_srv->reset_cur_dp_mst_topology = link_reset_cur_dp_mst_topology;
+ link_srv->get_status = link_get_status;
+ link_srv->is_hdcp1x_supported = link_is_hdcp14;
+ link_srv->is_hdcp2x_supported = link_is_hdcp22;
+ link_srv->clear_dprx_states = link_clear_dprx_states;
+}
+
+/* link resource implements accessors to link resource. */
+static void construct_link_service_resource(struct link_service *link_srv)
+{
+ link_srv->get_cur_res_map = link_get_cur_res_map;
+ link_srv->restore_res_map = link_restore_res_map;
+ link_srv->get_cur_link_res = link_get_cur_link_res;
+}
+
+/* link validation owns timing validation against various link limitations. (ex.
+ * link bandwidth, receiver capability or our hardware capability) It also
+ * provides helper functions exposing bandwidth formulas used in validation.
+ */
+static void construct_link_service_validation(struct link_service *link_srv)
+{
+ link_srv->validate_mode_timing = link_validate_mode_timing;
+ link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
+}
+
+/* link dpms owns the programming sequence of stream's dpms state associated
+ * with the link and link's enable/disable sequences as result of the stream's
+ * dpms state change.
+ */
+static void construct_link_service_dpms(struct link_service *link_srv)
+{
+ link_srv->set_dpms_on = link_set_dpms_on;
+ link_srv->set_dpms_off = link_set_dpms_off;
+ link_srv->resume = link_resume;
+ link_srv->blank_all_dp_displays = link_blank_all_dp_displays;
+ link_srv->blank_all_edp_displays = link_blank_all_edp_displays;
+ link_srv->blank_dp_stream = link_blank_dp_stream;
+ link_srv->increase_mst_payload = link_increase_mst_payload;
+ link_srv->reduce_mst_payload = link_reduce_mst_payload;
+ link_srv->set_dsc_on_stream = link_set_dsc_on_stream;
+ link_srv->set_dsc_enable = link_set_dsc_enable;
+ link_srv->update_dsc_config = link_update_dsc_config;
+}
+
+/* link ddc implements generic display communication protocols such as i2c, aux
+ * and scdc. It should not contain any specific applications of these
+ * protocols such as display capability query, detection, or handshaking such as
+ * link training.
+ */
+static void construct_link_service_ddc(struct link_service *link_srv)
+{
+ link_srv->create_ddc_service = link_create_ddc_service;
+ link_srv->destroy_ddc_service = link_destroy_ddc_service;
+ link_srv->query_ddc_data = link_query_ddc_data;
+ link_srv->aux_transfer_raw = link_aux_transfer_raw;
+ link_srv->aux_transfer_with_retries_no_mutex =
+ link_aux_transfer_with_retries_no_mutex;
+ link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode;
+ link_srv->get_aux_defer_delay = link_get_aux_defer_delay;
+}
+
+/* link dp capability implements dp specific link capability retrieval sequence.
+ * It is responsible for retrieving, parsing, overriding, deciding capability
+ * obtained from dp link. Link capability consists of encoders, DPRXs, cables,
+ * retimers, usb and all other possible backend capabilities.
+ */
+static void construct_link_service_dp_capability(struct link_service *link_srv)
+{
+ link_srv->dp_is_sink_present = dp_is_sink_present;
+ link_srv->dp_is_fec_supported = dp_is_fec_supported;
+ link_srv->dp_is_128b_132b_signal = dp_is_128b_132b_signal;
+ link_srv->dp_get_max_link_enc_cap = dp_get_max_link_enc_cap;
+ link_srv->dp_get_verified_link_cap = dp_get_verified_link_cap;
+ link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
+ link_srv->dp_should_enable_fec = dp_should_enable_fec;
+ link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->mst_decide_link_encoding_format =
+ mst_decide_link_encoding_format;
+ link_srv->edp_decide_link_settings = edp_decide_link_settings;
+ link_srv->bw_kbps_from_raw_frl_link_rate_data =
+ link_bw_kbps_from_raw_frl_link_rate_data;
+ link_srv->dp_overwrite_extended_receiver_cap =
+ dp_overwrite_extended_receiver_cap;
+ link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+}
+
+/* link dp phy/dpia implements basic dp phy/dpia functionality such as
+ * enable/disable output and set lane/drive settings. It is responsible for
+ * maintaining and update software state representing current phy/dpia status
+ * such as current link settings.
+ */
+static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv)
+{
+ link_srv->dpia_handle_usb4_bandwidth_allocation_for_link =
+ dpia_handle_usb4_bandwidth_allocation_for_link;
+ link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response;
+ link_srv->dp_set_drive_settings = dp_set_drive_settings;
+ link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl;
+}
+
+/* link dp irq handler implements DP HPD short pulse handling sequence according
+ * to DP specifications
+ */
+static void construct_link_service_dp_irq_handler(struct link_service *link_srv)
+{
+ link_srv->dp_parse_link_loss_status = dp_parse_link_loss_status;
+ link_srv->dp_should_allow_hpd_rx_irq = dp_should_allow_hpd_rx_irq;
+ link_srv->dp_handle_link_loss = dp_handle_link_loss;
+ link_srv->dp_read_hpd_rx_irq_data = dp_read_hpd_rx_irq_data;
+ link_srv->dp_handle_hpd_rx_irq = dp_handle_hpd_rx_irq;
+}
+
+/* link edp panel control implements retrieval and configuration of eDP panel
+ * features such as PSR and ABM and it also manages specs defined eDP panel
+ * power sequences.
+ */
+static void construct_link_service_edp_panel_control(struct link_service *link_srv)
+{
+ link_srv->edp_panel_backlight_power_on = edp_panel_backlight_power_on;
+ link_srv->edp_get_backlight_level = edp_get_backlight_level;
+ link_srv->edp_get_backlight_level_nits = edp_get_backlight_level_nits;
+ link_srv->edp_set_backlight_level = edp_set_backlight_level;
+ link_srv->edp_set_backlight_level_nits = edp_set_backlight_level_nits;
+ link_srv->edp_get_target_backlight_pwm = edp_get_target_backlight_pwm;
+ link_srv->edp_get_psr_state = edp_get_psr_state;
+ link_srv->edp_set_psr_allow_active = edp_set_psr_allow_active;
+ link_srv->edp_setup_psr = edp_setup_psr;
+ link_srv->edp_set_sink_vtotal_in_psr_active =
+ edp_set_sink_vtotal_in_psr_active;
+ link_srv->edp_get_psr_residency = edp_get_psr_residency;
+ link_srv->edp_wait_for_t12 = edp_wait_for_t12;
+ link_srv->edp_is_ilr_optimization_required =
+ edp_is_ilr_optimization_required;
+ link_srv->edp_backlight_enable_aux = edp_backlight_enable_aux;
+ link_srv->edp_add_delay_for_T9 = edp_add_delay_for_T9;
+ link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
+ link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
+ link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
+}
+
+/* link dp cts implements dp compliance test automation protocols and manual
+ * testing interfaces for debugging and certification purpose.
+ */
+static void construct_link_service_dp_cts(struct link_service *link_srv)
+{
+ link_srv->dp_handle_automated_test = dp_handle_automated_test;
+ link_srv->dp_set_test_pattern = dp_set_test_pattern;
+ link_srv->dp_set_preferred_link_settings =
+ dp_set_preferred_link_settings;
+ link_srv->dp_set_preferred_training_settings =
+ dp_set_preferred_training_settings;
+}
+
+/* link dp trace implements tracing interfaces for tracking major dp sequences
+ * including execution status and timestamps
+ */
+static void construct_link_service_dp_trace(struct link_service *link_srv)
+{
+ link_srv->dp_trace_is_initialized = dp_trace_is_initialized;
+ link_srv->dp_trace_set_is_logged_flag = dp_trace_set_is_logged_flag;
+ link_srv->dp_trace_is_logged = dp_trace_is_logged;
+ link_srv->dp_trace_get_lt_end_timestamp = dp_trace_get_lt_end_timestamp;
+ link_srv->dp_trace_get_lt_counts = dp_trace_get_lt_counts;
+ link_srv->dp_trace_get_link_loss_count = dp_trace_get_link_loss_count;
+ link_srv->dp_trace_set_edp_power_timestamp =
+ dp_trace_set_edp_power_timestamp;
+ link_srv->dp_trace_get_edp_poweron_timestamp =
+ dp_trace_get_edp_poweron_timestamp;
+ link_srv->dp_trace_get_edp_poweroff_timestamp =
+ dp_trace_get_edp_poweroff_timestamp;
+ link_srv->dp_trace_source_sequence = dp_trace_source_sequence;
+}
+
+static void construct_link_service(struct link_service *link_srv)
+{
+ /* All link service functions should fall under some sub categories.
+ * If a new function doesn't perfectly fall under an existing sub
+ * category, it must be that you are either adding a whole new aspect of
+ * responsibility to link service or something doesn't belong to link
+ * service. In that case please contact the arch owner to arrange a
+ * design review meeting.
+ */
+ construct_link_service_factory(link_srv);
+ construct_link_service_detection(link_srv);
+ construct_link_service_resource(link_srv);
+ construct_link_service_validation(link_srv);
+ construct_link_service_dpms(link_srv);
+ construct_link_service_ddc(link_srv);
+ construct_link_service_dp_capability(link_srv);
+ construct_link_service_dp_phy_or_dpia(link_srv);
+ construct_link_service_dp_irq_handler(link_srv);
+ construct_link_service_edp_panel_control(link_srv);
+ construct_link_service_dp_cts(link_srv);
+ construct_link_service_dp_trace(link_srv);
+}
+
+struct link_service *link_create_link_service(void)
+{
+ struct link_service *link_srv = kzalloc(sizeof(*link_srv), GFP_KERNEL);
+
+ if (link_srv == NULL)
+ goto fail;
+
+ construct_link_service(link_srv);
+
+ return link_srv;
+fail:
+ return NULL;
+}
+
+void link_destroy_link_service(struct link_service **link_srv)
+{
+ kfree(*link_srv);
+ *link_srv = NULL;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
{
switch (encoder.id) {
case ENCODER_ID_INTERNAL_UNIPHY:
@@ -181,7 +435,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
-static bool dc_link_construct_phy(struct dc_link *link,
+static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
uint8_t i;
@@ -274,14 +528,18 @@ static bool dc_link_construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
- case 1: // only the 1st eDP handles hotplug
+ case HPD_EN_FOR_ALL_EDP:
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ break;
+ case HPD_EN_FOR_PRIMARY_EDP_ONLY:
if (link->link_index == 0)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
else
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
- case 2: // only the 2nd eDP handles hotplug
+ case HPD_EN_FOR_SECONDARY_EDP_ONLY:
if (link->link_index == 1)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
@@ -289,6 +547,7 @@ static bool dc_link_construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
default:
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
}
}
@@ -473,7 +732,7 @@ create_fail:
return false;
}
-static bool dc_link_construct_dpia(struct dc_link *link,
+static bool construct_dpia(struct dc_link *link,
const struct link_init_data *init_params)
{
struct ddc_service_init_data ddc_service_init_data = { 0 };
@@ -543,9 +802,9 @@ static bool link_construct(struct dc_link *link,
{
/* Handle dpia case */
if (init_params->is_dpia_link == true)
- return dc_link_construct_dpia(link, init_params);
+ return construct_dpia(link, init_params);
else
- return dc_link_construct_phy(link, init_params);
+ return construct_phy(link, init_params);
}
struct dc_link *link_create(const struct link_init_data *init_params)
@@ -574,4 +833,3 @@ void link_destroy(struct dc_link **link)
kfree(*link);
*link = NULL;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index 5b846147c4a6..e96220d48d03 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -25,5 +25,7 @@
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
#include "link.h"
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
#endif /* __LINK_FACTORY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 45554d30adf0..1907bda3cb6e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -25,7 +25,8 @@
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
#include "link.h"
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
+void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
struct link_resource *link_res);
-
#endif /* __LINK_RESOURCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index d4f6ee6ca948..9a5010f86003 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -29,6 +29,7 @@
* provides helper functions exposing bandwidth formulas used in validation.
*/
#include "link_validation.h"
+#include "protocols/link_dp_capability.h"
#include "resource.h"
#define DC_LOGGER_INIT(logger)
@@ -123,7 +124,7 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
struct dc_crtc_timing outputTiming = *timing;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
@@ -233,7 +234,7 @@ uint32_t dp_link_bandwidth_kbps(
*/
link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
- if (dc_link_should_enable_fec(link)) {
+ if (dp_should_enable_fec(link)) {
total_data_bw_efficiency_x10000 /= 100;
total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
}
@@ -254,19 +255,16 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
-uint32_t link_timing_bandwidth_kbps(
- const struct dc_crtc_timing *timing)
+uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (timing->flags.DSC)
return dc_dsc_stream_bandwidth_in_kbps(timing,
timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp);
-#endif /* CONFIG_DRM_AMD_DC_DCN */
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -329,7 +327,7 @@ static bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
- link_setting = dc_link_get_link_cap(link);
+ link_setting = dp_get_verified_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -338,7 +336,7 @@ static bool dp_validate_mode_timing(
*/
req_bw = dc_bandwidth_in_kbps_from_timing(timing);
- max_bw = dc_link_bandwidth_kbps(link, link_setting);
+ max_bw = dp_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index ab6a44f50032..2191d3a4950c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -25,4 +25,11 @@
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
#include "link.h"
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 5269125bc2a4..0fa1228bc178 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -53,7 +53,7 @@ struct aux_payloads {
struct vector payloads;
};
-static bool dal_ddc_i2c_payloads_create(
+static bool i2c_payloads_create(
struct dc_context *ctx,
struct i2c_payloads *payloads,
uint32_t count)
@@ -65,16 +65,24 @@ static bool dal_ddc_i2c_payloads_create(
return false;
}
-static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+static struct i2c_payload *i2c_payloads_get(struct i2c_payloads *p)
{
return (struct i2c_payload *)p->payloads.container;
}
-static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+static uint32_t i2c_payloads_get_count(struct i2c_payloads *p)
{
return p->payloads.count;
}
+static void i2c_payloads_destroy(struct i2c_payloads *p)
+{
+ if (!p)
+ return;
+
+ dal_vector_destruct(&p->payloads);
+}
+
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
static void i2c_payloads_add(
@@ -364,10 +372,10 @@ bool link_query_ddc_data(
struct i2c_command command = {0};
struct i2c_payloads payloads;
- if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ if (!i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
return false;
- command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.payloads = i2c_payloads_get(&payloads);
command.number_of_payloads = 0;
command.engine = DDC_I2C_COMMAND_ENGINE;
command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
@@ -379,20 +387,20 @@ bool link_query_ddc_data(
&payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(&payloads);
+ i2c_payloads_get_count(&payloads);
success = dm_helpers_submit_i2c(
ddc->ctx,
ddc->link,
&command);
- dal_vector_destruct(&payloads.payloads);
+ i2c_payloads_destroy(&payloads);
}
return success;
}
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+int link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index aaa5064408ba..860ef15d7f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -37,13 +37,41 @@
#define EDID_SEGMENT_SIZE 256
+struct ddc_service *link_create_ddc_service(
+ struct ddc_service_init_data *ddc_init_data);
+
+void link_destroy_ddc_service(struct ddc_service **ddc);
+
void set_ddc_transaction_type(
struct ddc_service *ddc,
enum ddc_transaction_type type);
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
bool try_to_configure_aux_timeout(struct ddc_service *ddc,
uint32_t timeout);
+bool link_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
+ * states as outlined in the DP spec. Returns true if the request was
+ * successful.
+ *
+ * NOTE: The function requires explicit mutex on DM side in order to prevent
+ * potential race condition. DC components should call the dpcd read/write
+ * function in dm_helpers in order to access dpcd safely
+ */
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
void write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
@@ -57,5 +85,8 @@ void set_dongle_type(struct ddc_service *ddc,
struct ddc *get_ddc_pin(struct ddc_service *ddc_service);
+int link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
#endif /* __DAL_DDC_SERVICE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index d4370856f164..e9bcb35ae185 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -42,6 +42,8 @@
#include "link_edp_panel_control.h"
#include "link_dp_irq_handler.h"
#include "link/accessories/link_dp_trace.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
#include "link_dp_training.h"
#include "atomfirmware.h"
#include "resource.h"
@@ -155,7 +157,7 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
case 0b001:
@@ -278,7 +280,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
if (!link->dpcd_caps.dpcd_rev.raw) {
do {
- dc_link_dp_receiver_power_ctrl(link, true);
+ dpcd_write_rx_power_ctrl(link, true);
core_link_read_dpcd(link, DP_DPCD_REV,
dpcd_data, length);
link->dpcd_caps.dpcd_rev.raw = dpcd_data[
@@ -309,7 +311,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
-bool dc_link_is_fec_supported(const struct dc_link *link)
+bool dp_is_fec_supported(const struct dc_link *link)
{
/* TODO - use asic cap instead of link_enc->features
* we no longer know which link enc to use for this link before commit
@@ -325,7 +327,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
}
-bool dc_link_should_enable_fec(const struct dc_link *link)
+bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
@@ -342,10 +344,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
|| !link->dc->caps.edp_dsc_support))
force_disable = true;
- return !force_disable && dc_link_is_fec_supported(link);
+ return !force_disable && dp_is_fec_supported(link);
}
-bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
{
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
@@ -645,7 +647,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
return false;
/* search for the minimum link setting that:
@@ -654,7 +656,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -679,7 +681,8 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
return false;
}
-bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
{
struct dc_link_settings initial_link_setting;
struct dc_link_settings current_link_setting;
@@ -709,7 +712,7 @@ bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_setti
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -764,7 +767,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
initial_link_setting.use_link_rate_set = false;
initial_link_setting.link_rate_set = 0;
current_link_setting = initial_link_setting;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
return false;
/* search for the minimum link setting that:
@@ -773,7 +776,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
*/
while (current_link_setting.link_rate <=
max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -830,7 +833,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
*/
while (current_link_setting.link_rate <=
max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -922,12 +925,12 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
tmp_timing.flags.DSC = 0;
orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
- dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
max_link_rate = tmp_link_setting.link_rate;
}
decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
} else {
- dc_link_decide_edp_link_settings(link, link_setting, req_bw);
+ edp_decide_link_settings(link, link_setting, req_bw);
}
} else {
decide_dp_link_settings(link, link_setting, req_bw);
@@ -948,7 +951,7 @@ enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings
return DP_UNKNOWN_ENCODING;
}
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link)
{
struct dc_link_settings link_settings = {0};
@@ -1005,7 +1008,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
* signal and may need up to 1 ms before being able to reply.
*/
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
- udelay(1000);
+ fsleep(1000);
aux_channel_retry_cnt++;
}
}
@@ -1121,7 +1124,7 @@ static void get_active_converter_info(
union hdmi_encoded_link_bw hdmi_encoded_link_bw;
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
- dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ link_bw_kbps_from_raw_frl_link_rate_data(
hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
// Intersect reported max link bw support with the supported link rate post FRL link training
@@ -1216,7 +1219,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
dp_disable_link_phy(link, &link_res, link->connector_signal);
}
-static bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
{
uint8_t dpcd_data[16];
uint32_t read_dpcd_retry_cnt = 3;
@@ -1278,12 +1281,6 @@ static bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
return true;
}
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link)
-{
- dp_overwrite_extended_receiver_cap(link);
-}
-
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
@@ -1972,7 +1969,7 @@ void detect_edp_sink_caps(struct dc_link *link)
sizeof(link->dpcd_caps.alpm_caps.raw));
}
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
struct link_encoder *link_enc = NULL;
@@ -1995,7 +1992,7 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
return false;
}
-const struct dc_link_settings *dc_link_get_link_cap(
+const struct dc_link_settings *dp_get_verified_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
@@ -2121,9 +2118,9 @@ static bool dp_verify_link_cap(
if (status == LINK_TRAINING_SUCCESS) {
success = true;
- udelay(1000);
- if (dc_link_dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
- dc_link_check_link_loss_status(
+ fsleep(1000);
+ if (dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
+ dp_parse_link_loss_status(
link,
&irq_data))
(*fail_count)++;
@@ -2163,7 +2160,7 @@ bool dp_verify_link_cap_with_retries(
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
link->verified_link_cap = fail_safe_link_settings;
break;
} else if (dp_verify_link_cap(link, known_limit_link_setting,
@@ -2171,7 +2168,7 @@ bool dp_verify_link_cap_with_retries(
success = true;
break;
}
- msleep(10);
+ fsleep(10 * 1000);
}
dp_trace_lt_fail_count_update(link, fail_count, true);
@@ -2181,10 +2178,9 @@ bool dp_verify_link_cap_with_retries(
}
/**
- * dc_link_is_dp_sink_present() - Check if there is a native DP
- * or passive DP-HDMI dongle connected
+ * Check if there is a native DP or passive DP-HDMI dongle connected
*/
-bool dc_link_is_dp_sink_present(struct dc_link *link)
+bool dp_is_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
@@ -2231,7 +2227,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
ASSERT(gpio_result == GPIO_RESULT_OK);
if (clock_pin)
- udelay(1000);
+ fsleep(1000);
else
break;
} while (retry++ < 3);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index f79e4a4a9db6..8f0ce97f2362 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -34,32 +34,56 @@ void detect_edp_sink_caps(struct dc_link *link);
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
+bool dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+const struct dc_link_settings *dp_get_verified_link_cap(
+ const struct dc_link *link);
+
+enum dp_link_encoding link_dp_get_encoding_format(
+ const struct dc_link_settings *link_settings);
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+bool dp_is_sink_present(struct dc_link *link);
+
bool dp_is_lttpr_present(struct dc_link *link);
+bool dp_is_fec_supported(const struct dc_link *link);
+
bool is_dp_active_dongle(const struct dc_link *link);
bool is_dp_branch_device(const struct dc_link *link);
void dpcd_write_cable_id_to_dprx(struct dc_link *link);
+bool dp_should_enable_fec(const struct dc_link *link);
+
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+
/* Initialize output parameter lt_settings. */
void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
+bool link_decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
bool decide_edp_link_settings_with_dsc(struct dc_link *link,
struct dc_link_settings *link_setting,
uint32_t req_bw,
enum dc_link_rate max_link_rate);
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link);
+
void dpcd_set_source_specific_data(struct dc_link *link);
/*query dpcd for version and mst cap addresses*/
@@ -76,4 +100,8 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
+
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 32f48a48e9dd..4626fabc0a96 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -26,7 +26,6 @@
#include "dc.h"
#include "inc/core_status.h"
-#include "dc_link.h"
#include "dpcd_defs.h"
#include "link_dp_dpia.h"
@@ -79,7 +78,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
-bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 98935cc10bb7..363f45a1a964 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -37,7 +37,5 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
/* Query hot plug status of USB4 DP tunnel.
* Returns true if HPD high.
*/
-bool dc_link_dpia_query_hpd_status(struct dc_link *link);
-
-
+bool dpia_query_hpd_status(struct dc_link *link);
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index f69e681b3b5b..931f7c6446de 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -26,12 +26,17 @@
/*********************************************************************/
// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
/*********************************************************************/
-#include "dc.h"
-#include "dc_link.h"
#include "link_dp_dpia_bw.h"
-#include "drm_dp_helper_dc.h"
#include "link_dpcd.h"
+#include "dc_dmub_srv.h"
+#define DC_LOGGER \
+ link->ctx->logger
+
+/* Number of Host Routers per motherboard is 2 */
+#define MAX_HR_NUM 2
+/* Number of DPIA per host router is 2 */
+#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
#define Kbps_TO_Gbps (1000 * 1000)
// ------------------------------------------------------------------
@@ -84,12 +89,11 @@ static int get_estimated_bw(struct dc_link *link)
{
uint8_t bw_estimated_bw = 0;
- if (core_link_read_dpcd(
- link,
- ESTIMATED_BW,
- &bw_estimated_bw,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, ESTIMATED_BW);
+ core_link_read_dpcd(
+ link,
+ ESTIMATED_BW,
+ &bw_estimated_bw,
+ sizeof(uint8_t));
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
@@ -133,8 +137,9 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
{
const struct dc *dc_struct = link->dc;
uint8_t idx = 0xFF;
+ int i;
- for (int i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
if (!dc_struct->links[i] ||
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
@@ -161,8 +166,9 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
struct dc_link *link_temp;
int total_bw = 0;
+ int i;
- for (int i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
@@ -194,15 +200,13 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
*/
static bool dpia_bw_alloc_unplug(struct dc_link *link)
{
- bool ret = false;
-
if (!link)
return true;
return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
link->dpia_bw_alloc_config.sink_allocated_bw, link);
}
-static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
{
uint8_t requested_bw;
uint32_t temp;
@@ -227,9 +231,7 @@ static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw)
link,
REQUESTED_BW,
&requested_bw,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, REQUESTED_BW);
- else
+ sizeof(uint8_t)) == DC_OK)
link->dpia_bw_alloc_config.response_ready = false; // Reset flag
}
/*
@@ -246,7 +248,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
{
bool ret = false;
uint8_t response = 0,
@@ -257,22 +259,18 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
goto out;
if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
-
- bw_support_dpia = (response >> 7) & 1;
+ link,
+ DP_TUNNELING_CAPABILITIES,
+ &response,
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_dpia = (response >> 7) & 1;
if (core_link_read_dpcd(
link,
USB4_DRIVER_BW_CAPABILITY,
&response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
-
- bw_support_cm = (response >> 7) & 1;
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_cm = (response >> 7) & 1;
/* Send request acknowledgment to Turn ON DPTX support */
if (bw_support_cm && bw_support_dpia) {
@@ -282,15 +280,14 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
link,
DPTX_BW_ALLOCATION_MODE_CONTROL,
&response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n",
- "**** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
- __func__, DP_TUNNELING_CAPABILITIES);
- else {
-
+ sizeof(uint8_t)) != DC_OK) {
+ DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
+ } else {
// SUCCESS Enabled DPtx BW Allocation Mode Support
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- dm_output_to_console("**** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n");
+ DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
ret = true;
init_usb4_bw_struct(link);
@@ -300,8 +297,12 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
out:
return ret;
}
-void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result)
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
+ int bw_needed = 0;
+ int estimated = 0;
+ int host_router_total_estimated_bw = 0;
+
if (!get_bw_alloc_proceed_flag((link)))
return;
@@ -309,13 +310,13 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_REQ_FAILED:
- dm_output_to_console("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
// Update the new Estimated BW value updated by CM
link->dpia_bw_alloc_config.estimated_bw =
bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
link->dpia_bw_alloc_config.response_ready = false;
/*
@@ -329,18 +330,18 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_REQ_SUCCESS:
- dm_output_to_console("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
// 1. SUCCESS 1st time before any Pruning is done
// 2. SUCCESS after prev. FAIL before any Pruning is done
// 3. SUCCESS after Pruning is done but before enabling link
- int needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
// 1.
if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, needed, link);
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
link->dpia_bw_alloc_config.sink_verified_bw =
link->dpia_bw_alloc_config.sink_allocated_bw;
@@ -354,12 +355,12 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
// Find out how much do we need to de-alloc
- if (link->dpia_bw_alloc_config.sink_allocated_bw > needed)
+ if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- link->dpia_bw_alloc_config.sink_allocated_bw - needed, link);
+ link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
else
allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
+ bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
}
// 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
@@ -370,27 +371,20 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_EST_BW_CHANGED:
- dm_output_to_console("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
- int available = 0, estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- int host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
+ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
// 1. If due to unplug of other sink
if (estimated == host_router_total_estimated_bw) {
-
// First update the estimated & max_bw fields
if (link->dpia_bw_alloc_config.estimated_bw < estimated) {
- available = estimated - link->dpia_bw_alloc_config.estimated_bw;
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
}
// 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
else {
-
- // We took from another unplugged/problematic sink to give to us
- if (link->dpia_bw_alloc_config.estimated_bw < estimated)
- available = estimated - link->dpia_bw_alloc_config.estimated_bw;
-
// We lost estimated bw usually due to plug event of other dpia
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
@@ -398,12 +392,12 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_ALLOC_CAPS_CHANGED:
- dm_output_to_console("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
break;
}
}
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
int ret = 0;
uint8_t timeout = 10;
@@ -417,14 +411,14 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *li
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
- dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
do {
- if (!timeout > 0)
+ if (!(timeout > 0))
timeout--;
else
break;
- udelay(10 * 1000);
+ fsleep(10 * 1000);
} while (!get_cm_response_ready_flag(link));
if (!timeout)
@@ -439,3 +433,65 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *li
out:
return ret;
}
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+{
+ int ret = 0;
+ uint8_t timeout = 10;
+
+ if (!get_bw_alloc_proceed_flag(link))
+ goto out;
+
+ /*
+ * Sometimes stream uses same timing parameters as the already
+ * allocated max sink bw so no need to re-alloc
+ */
+ if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
+ set_usb4_req_bw_req(link, req_bw);
+ do {
+ if (!(timeout > 0))
+ timeout--;
+ else
+ break;
+ udelay(10 * 1000);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+ ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ }
+
+out:
+ return ret;
+}
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, uint8_t num_dpias)
+{
+ bool ret = true;
+ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+ uint8_t lowest_dpia_index = 0, dpia_index = 0;
+ uint8_t i;
+
+ if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+ return ret;
+
+ //Get total Host Router BW & Validate against each Host Router max BW
+ for (i = 0; i < num_dpias; ++i) {
+
+ if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+ continue;
+
+ lowest_dpia_index = get_lowest_dpia_index(link[i]);
+ if (link[i]->link_index < lowest_dpia_index)
+ continue;
+
+ dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+ bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+ if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+
+ ret = false;
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index c2c3049adcd1..382616c8b698 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,6 +26,8 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
+#include "link.h"
+
/*
* Host Router BW type
*/
@@ -42,6 +44,54 @@ enum bw_type {
*
* return: SUCCESS or FAILURE
*/
-bool set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+
+/*
+ * Allocates only what the stream needs for bw, so if:
+ * If (stream_req_bw < or > already_allocated_bw_at_HPD)
+ * => Deallocate Max Bw & then allocate only what the stream needs
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: Bw requested by the stream
+ *
+ * return: allocated bw else return 0
+ */
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
+
+/*
+ * Handle the validation of total BW here and confirm that the bw used by each
+ * DPIA doesn't exceed available BW for each host router (HR)
+ *
+ * @link[]: array of link pointer to all possible DPIA links
+ * @bw_needed[]: bw needed for each DPIA link based on timing
+ * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, uint8_t num_dpias);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 9d80427520cf..ba95facc4ee8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -33,13 +33,14 @@
#include "link_dpcd.h"
#include "link_dp_training.h"
#include "link_dp_capability.h"
+#include "link_edp_panel_control.h"
#include "link/accessories/link_dp_trace.h"
#include "link/link_dpms.h"
#include "dm_helpers.h"
#define DC_LOGGER_INIT(logger)
-bool dc_link_check_link_loss_status(
+bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
{
@@ -155,9 +156,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
/* PSR error, disable and re-enable PSR */
if (link->psr_settings.psr_allow_active) {
allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
}
return true;
@@ -174,7 +175,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
-void dc_link_dp_handle_link_loss(struct dc_link *link)
+void dp_handle_link_loss(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
@@ -200,7 +201,7 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
}
}
-enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+enum dc_status dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@@ -247,7 +248,7 @@ enum dc_status dc_link_dp_read_hpd_rx_irq_data(
}
/*************************Short Pulse IRQ***************************/
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
{
/*
* Don't handle RX IRQ unless one of following is met:
@@ -262,8 +263,9 @@ bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work)
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
{
union hpd_irq_data hpd_irq_dpcd_data = {0};
union device_service_irq device_service_clear = {0};
@@ -288,7 +290,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* dal_dpsst_ls_read_hpd_irq_data
* Order of calls is important too
*/
- result = dc_link_dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ result = dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
if (out_hpd_irq_dpcd_data)
*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
@@ -315,7 +317,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
return false;
}
- if (!dc_link_dp_allow_hpd_rx_irq(link)) {
+ if (!dp_should_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -348,9 +350,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* then DM should call DC to do the detection.
* NOTE: Do not handle link loss on eDP since it is internal link*/
if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- dc_link_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+ dp_parse_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
@@ -360,7 +362,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (defer_handling && has_left_work)
*has_left_work = true;
else
- dc_link_dp_handle_link_loss(link);
+ dp_handle_link_loss(link);
status = false;
if (out_link_loss)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index 39b2e51ea79d..ac33730fedd4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -27,5 +27,15 @@
#define __DC_LINK_DP_IRQ_HANDLER_H__
#include "link.h"
-
+bool dp_parse_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link);
+void dp_handle_link_loss(struct dc_link *link);
+enum dc_status dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index cd9fb8126bcf..b7abba55bc2f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -40,7 +40,7 @@
#define DC_LOGGER \
link->ctx->logger
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on)
{
uint8_t state;
@@ -64,7 +64,7 @@ void dp_enable_link_phy(
link->cur_link_settings = *link_settings;
link->dc->hwss.enable_dp_link_output(link, link_res, signal,
clock_source, link_settings);
- dc_link_dp_receiver_power_ctrl(link, true);
+ dpcd_write_rx_power_ctrl(link, true);
}
void dp_disable_link_phy(struct dc_link *link,
@@ -74,7 +74,7 @@ void dp_disable_link_phy(struct dc_link *link,
struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered)
- dc_link_dp_receiver_power_ctrl(link, false);
+ dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal);
/* Clear current link setting.*/
@@ -143,7 +143,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
- if (!dc_link_should_enable_fec(link))
+ if (!dp_should_enable_fec(link))
return status;
if (link_enc->funcs->fec_set_ready &&
@@ -183,7 +183,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
- if (!dc_link_should_enable_fec(link))
+ if (!dp_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_enable &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index dba1f29df319..1eb0619d6710 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -51,6 +51,9 @@ void dp_set_drive_settings(
enum dc_status dp_set_fec_ready(struct dc_link *link,
const struct link_resource *link_res, bool ready);
+
void dp_set_fec_enable(struct dc_link *link, bool enable);
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
+
#endif /* __DC_LINK_DP_PHY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index b48d4d822991..a9025671ee4a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -41,6 +41,8 @@
#include "link_dp_phy.h"
#include "link_dp_capability.h"
#include "link_edp_panel_control.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
#include "atomfirmware.h"
#include "link_enc_cfg.h"
#include "resource.h"
@@ -258,10 +260,7 @@ void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
- if (wait_in_micro_secs > 1000)
- msleep(wait_in_micro_secs/1000);
- else
- udelay(wait_in_micro_secs);
+ fsleep(wait_in_micro_secs);
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -725,12 +724,10 @@ void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
}
@@ -780,7 +777,7 @@ enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
return pattern;
}
-enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
struct dc_link_settings *link_setting)
{
enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting);
@@ -865,8 +862,9 @@ static enum dc_status configure_lttpr_mode_non_transparent(
uint8_t repeater_id;
enum dc_status result = DC_ERROR_UNEXPECTED;
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+ const struct dc *dc = link->dc;
- enum dp_link_encoding encoding = link_dp_get_encoding_format(&lt_settings->link_settings);
+ enum dp_link_encoding encoding = dc->link_srv->dp_get_encoding_format(&lt_settings->link_settings);
if (encoding == DP_8b_10b_ENCODING) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
@@ -970,7 +968,7 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
- udelay(1000);
+ fsleep(1000);
}
}
}
@@ -1573,7 +1571,6 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
@@ -1587,17 +1584,16 @@ bool perform_link_training_with_retries(
result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
}
-#endif
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
+ dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dc_link_dpia_perform_link_training(
+ status = dpia_perform_link_training(
link,
&pipe_ctx->link_res,
&cur_link_settings,
@@ -1649,7 +1645,7 @@ bool perform_link_training_with_retries(
if (status == LINK_TRAINING_ABORT) {
enum dc_connection_type type = dc_connection_none;
- dc_link_detect_connection_type(link, &type);
+ link_detect_connection_type(link, &type);
if (type == dc_connection_none) {
DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
@@ -1682,7 +1678,7 @@ bool perform_link_training_with_retries(
* minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
is_link_bw_low = (req_bw > link_bw);
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index a04948635369..7d027bac8255 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -119,6 +119,9 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
const struct dc_link_settings *link_settings);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
void dp_get_lttpr_mode_override(struct dc_link *link,
enum lttpr_mode *override);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
index e50ec5012559..4c6b886a9da8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -30,7 +30,7 @@
#include "link_dp_phy.h"
#define DC_LOGGER \
link->ctx->logger
-bool dc_link_dp_perform_link_training_skip_aux(
+bool dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
index 413999cd03c4..546387a5f32d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
@@ -28,7 +28,7 @@
#define __DC_LINK_DP_TRAINING_AUXLESS_H__
#include "link_dp_training.h"
-bool dc_link_dp_perform_link_training_skip_aux(
+bool dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index e60da0532c53..ab4aafdb5e5c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -29,7 +29,6 @@
#include "link_dp_training_dpia.h"
#include "dc.h"
#include "inc/core_status.h"
-#include "dc_link.h"
#include "dpcd_defs.h"
#include "link_dp_dpia.h"
@@ -986,7 +985,7 @@ static void dpia_training_abort(
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
}
-enum link_training_result dc_link_dpia_perform_link_training(
+enum link_training_result dpia_perform_link_training(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
@@ -999,7 +998,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
- lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings);
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
@@ -1035,7 +1034,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
* falling back to lower bandwidth settings possible.
*/
if (result == LINK_TRAINING_SUCCESS) {
- msleep(5);
+ fsleep(5000);
if (!link->is_automated)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
index 0150f2916421..b39fb9faf1c2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -32,7 +32,7 @@
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
*/
-enum link_training_result dc_link_dpia_perform_link_training(
+enum link_training_result dpia_perform_link_training(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 97e02b5b21ae..93a6bbe954bb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -139,7 +139,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
+bool edp_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms)
@@ -171,7 +171,7 @@ bool dc_link_set_backlight_level_nits(struct dc_link *link,
return true;
}
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
+bool edp_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits_avg,
uint32_t *backlight_millinits_peak)
{
@@ -201,7 +201,7 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
return true;
}
-bool link_backlight_enable_aux(struct dc_link *link, bool enable)
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
{
uint8_t backlight_enable = enable ? 1 : 0;
@@ -243,13 +243,13 @@ bool set_default_brightness_aux(struct dc_link *link)
if (default_backlight < 5000 || default_backlight > 5000000)
default_backlight = 150000; //
- return dc_link_set_backlight_level_nits(link, true,
+ return edp_set_backlight_level_nits(link, true,
default_backlight, 0);
}
return false;
}
-bool link_is_edp_ilr_optimization_required(struct dc_link *link,
+bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
struct dc_link_settings link_setting;
@@ -285,7 +285,7 @@ bool link_is_edp_ilr_optimization_required(struct dc_link *link,
req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
if (!crtc_timing->flags.DSC)
- dc_link_decide_edp_link_settings(link, &link_setting, req_bw);
+ edp_decide_link_settings(link, &link_setting, req_bw);
else
decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
@@ -299,7 +299,7 @@ bool link_is_edp_ilr_optimization_required(struct dc_link *link,
return false;
}
-void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
{
if (link->connector_signal != SIGNAL_TYPE_EDP)
return;
@@ -311,7 +311,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hp
link->dc->hwss.edp_backlight_control(link, true);
}
-bool dc_link_wait_for_t12(struct dc_link *link)
+bool edp_wait_for_t12(struct dc_link *link)
{
if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
link->dc->hwss.edp_wait_for_T12(link);
@@ -322,13 +322,13 @@ bool dc_link_wait_for_t12(struct dc_link *link)
return false;
}
-void link_edp_add_delay_for_T9(struct dc_link *link)
+void edp_add_delay_for_T9(struct dc_link *link)
{
if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
- udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
+ fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000);
}
-bool link_edp_receiver_ready_T9(struct dc_link *link)
+bool edp_receiver_ready_T9(struct dc_link *link)
{
unsigned int tries = 0;
unsigned char sinkstatus = 0;
@@ -353,7 +353,7 @@ bool link_edp_receiver_ready_T9(struct dc_link *link)
return result;
}
-bool link_edp_receiver_ready_T7(struct dc_link *link)
+bool edp_receiver_ready_T7(struct dc_link *link)
{
unsigned char sinkstatus = 0;
unsigned char edpRev = 0;
@@ -383,12 +383,12 @@ bool link_edp_receiver_ready_T7(struct dc_link *link)
}
if (link && link->panel_config.pps.extra_t7_ms > 0)
- udelay(link->panel_config.pps.extra_t7_ms * 1000);
+ fsleep(link->panel_config.pps.extra_t7_ms * 1000);
return result;
}
-bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
{
bool ret = false;
union dpcd_alpm_configuration alpm_config;
@@ -422,7 +422,7 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
return pipe_ctx;
}
-bool dc_link_set_backlight_level(const struct dc_link *link,
+bool edp_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
@@ -453,7 +453,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
return true;
}
-bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
bool wait, bool force_static, const unsigned int *power_opts)
{
struct dc *dc = link->ctx->dc;
@@ -502,7 +502,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
return true;
}
-bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -557,7 +557,7 @@ transmitter_to_phy_id(struct dc_link *link)
}
}
-bool dc_link_setup_psr(struct dc_link *link,
+bool edp_setup_psr(struct dc_link *link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context)
{
@@ -623,7 +623,7 @@ bool dc_link_setup_psr(struct dc_link *link,
sizeof(psr_configuration.raw));
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- link_power_alpm_dpcd_enable(link, true);
+ edp_power_alpm_dpcd_enable(link, true);
psr_context->su_granularity_required =
psr_config->su_granularity_required;
psr_context->su_y_granularity =
@@ -695,7 +695,6 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
/*skip power down the single pipe since it blocks the cstate*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
switch (link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
@@ -709,10 +708,6 @@ bool dc_link_setup_psr(struct dc_link *link,
break;
}
}
-#else
- if (link->ctx->asic_id.chip_family >= FAMILY_RV)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
/* SMU will perform additional powerdown sequence.
* For unsupported ASICs, set psr_level flag to skip PSR
@@ -757,7 +752,7 @@ bool dc_link_setup_psr(struct dc_link *link,
}
-void link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency)
{
struct dc *dc = link->ctx->dc;
struct dmub_psr *psr = dc->res_pool->psr;
@@ -772,7 +767,7 @@ void link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
else
*residency = 0;
}
-bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
{
struct dc *dc = link->ctx->dc;
struct dmub_psr *psr = dc->res_pool->psr;
@@ -803,7 +798,7 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
return abm;
}
-int dc_link_get_backlight_level(const struct dc_link *link)
+int edp_get_backlight_level(const struct dc_link *link)
{
struct abm *abm = get_abm_from_stream_res(link);
struct panel_cntl *panel_cntl = link->panel_cntl;
@@ -822,7 +817,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
return DC_ERROR_UNEXPECTED;
}
-int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+int edp_get_target_backlight_pwm(const struct dc_link *link)
{
struct abm *abm = get_abm_from_stream_res(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 7f91a564b089..28f552080558 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,4 +30,34 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
+int edp_get_backlight_level(const struct dc_link *link);
+bool edp_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+bool edp_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+bool edp_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+int edp_get_target_backlight_pwm(const struct dc_link *link);
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state);
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts);
+bool edp_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
+ uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+bool edp_wait_for_t12(struct dc_link *link);
+bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
+void edp_add_delay_for_T9(struct dc_link *link);
+bool edp_receiver_ready_T9(struct dc_link *link);
+bool edp_receiver_ready_T7(struct dc_link *link);
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
index 5f39dfe06e9a..e3d729ab5b9f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -33,18 +33,18 @@
#include "link_hpd.h"
#include "gpio_service_interface.h"
-bool dc_link_get_hpd_state(struct dc_link *dc_link)
+bool link_get_hpd_state(struct dc_link *link)
{
uint32_t state;
- dal_gpio_lock_pin(dc_link->hpd_gpio);
- dal_gpio_get_value(dc_link->hpd_gpio, &state);
- dal_gpio_unlock_pin(dc_link->hpd_gpio);
+ dal_gpio_lock_pin(link->hpd_gpio);
+ dal_gpio_get_value(link->hpd_gpio, &state);
+ dal_gpio_unlock_pin(link->hpd_gpio);
return state;
}
-void dc_link_enable_hpd(const struct dc_link *link)
+void link_enable_hpd(const struct dc_link *link)
{
struct link_encoder *encoder = link->link_enc;
@@ -52,7 +52,7 @@ void dc_link_enable_hpd(const struct dc_link *link)
encoder->funcs->enable_hpd(encoder);
}
-void dc_link_disable_hpd(const struct dc_link *link)
+void link_disable_hpd(const struct dc_link *link)
{
struct link_encoder *encoder = link->link_enc;
@@ -60,7 +60,7 @@ void dc_link_disable_hpd(const struct dc_link *link)
encoder->funcs->disable_hpd(encoder);
}
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+void link_enable_hpd_filter(struct dc_link *link, bool enable)
{
struct gpio *hpd;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 3d122def0c88..4fb526b264f9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -44,4 +44,11 @@ bool program_hpd_filter(const struct dc_link *link);
*/
bool dpia_query_hpd_status(struct dc_link *link);
bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high);
+bool link_get_hpd_state(struct dc_link *link);
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+void link_enable_hpd(const struct dc_link *link);
+void link_disable_hpd(const struct dc_link *link);
+void link_enable_hpd_filter(struct dc_link *link, bool enable);
#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 6b88ae14f1f9..aad8095660c9 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -53,11 +53,11 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
#include "amdgpu_dm/dc_fpu.h"
#define DC_FP_START() dc_fpu_begin(__func__, __LINE__)
#define DC_FP_END() dc_fpu_end(__func__, __LINE__)
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
/*
*
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 007d6bdc3e39..3175a4fe4d52 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -410,8 +410,8 @@ union dmub_fw_boot_options {
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
-
- uint32_t reserved : 15; /**< reserved */
+ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
+ uint32_t reserved : 14; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -1971,7 +1971,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2029,7 +2029,7 @@ struct dmub_cmd_psr_set_level_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2056,7 +2056,7 @@ struct dmub_rb_cmd_psr_enable_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2100,7 +2100,7 @@ struct dmub_cmd_psr_set_version_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2131,7 +2131,7 @@ struct dmub_cmd_psr_force_static_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2206,7 +2206,7 @@ struct dmub_cmd_update_dirty_rect_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2344,7 +2344,7 @@ struct dmub_cmd_update_cursor_payload0 {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2391,7 +2391,7 @@ struct dmub_cmd_psr_set_vtotal_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2429,7 +2429,7 @@ struct dmub_cmd_psr_set_power_opt_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
index 42229b4effdc..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 18b9173d5a96..cd870af5fd25 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -34,10 +34,6 @@
struct ddc;
struct irq_manager;
-enum {
- MAX_CONTROLLER_NUM = 6
-};
-
enum dp_power_state {
DP_POWER_STATE_D0 = 1,
DP_POWER_STATE_D3
@@ -60,28 +56,6 @@ enum {
DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
};
-enum link_training_result {
- LINK_TRAINING_SUCCESS,
- LINK_TRAINING_CR_FAIL_LANE0,
- LINK_TRAINING_CR_FAIL_LANE1,
- LINK_TRAINING_CR_FAIL_LANE23,
- /* CR DONE bit is cleared during EQ step */
- LINK_TRAINING_EQ_FAIL_CR,
- /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
- LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
- /* other failure during EQ step */
- LINK_TRAINING_EQ_FAIL_EQ,
- LINK_TRAINING_LQA_FAIL,
- /* one of the CR,EQ or symbol lock is dropped */
- LINK_TRAINING_LINK_LOSS,
- /* Abort link training (because sink unplugged) */
- LINK_TRAINING_ABORT,
- DP_128b_132b_LT_FAILED,
- DP_128b_132b_MAX_LOOP_COUNT_REACHED,
- DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
- DP_128b_132b_CDS_DONE_TIMEOUT,
-};
-
enum lttpr_mode {
LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 2be45b314922..315da61ee897 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -955,26 +955,20 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
* Check if Freesync is supported. Return if false. If true,
* set the corresponding bit in the info packet
*/
- bool freesync_on_desktop;
- bool fams_enable;
-
- fams_enable = stream->ctx->dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
- freesync_on_desktop = stream->freesync_on_desktop && fams_enable;
-
if (!vrr->send_info_frame)
return;
switch (packet_type) {
case PACKET_TYPE_FS_V3:
- build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_FS_V2:
- build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS_V1:
default:
- build_vrr_infopacket_v1(stream->signal, vrr, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
}
if (true == pack_sdp_v1_3 &&
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index eb6f9b9c504a..c62df3bcc7cb 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -26,13 +26,11 @@
#ifndef MOD_HDCP_LOG_H_
#define MOD_HDCP_LOG_H_
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
-#endif
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 3348bb97ef81..a4d344a4db9e 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t dp2_enabled;
uint8_t usb4_enabled;
};
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e39b133d05af..fa469de3e935 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -678,13 +678,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
bool result = false;
uint32_t i, j = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
return false;
-#else
- if (res_pool->abm == NULL)
- return false;
-#endif
memset(&ram_table, 0, sizeof(ram_table));
memset(&config, 0, sizeof(config));
@@ -737,12 +732,10 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.min_abm_backlight = ram_table.min_abm_backlight;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->multiple_abms[inst]) {
result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
} else
-#endif
result = res_pool->abm->funcs->init_abm_config(
res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 18d34bbceebe..79c41004c0b6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -4868,6 +4868,10 @@
#define mmCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
#define mmCP_ME2_PIPE3_INT_STATUS 0x1e34
#define mmCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_GFX_QUEUE_INDEX 0x1e37
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index 4127896ffcdf..52043e143067 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -18680,6 +18680,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 3973110f149c..a734abaa91a5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -4531,6 +4531,10 @@
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
#define mmCC_GC_EDC_CONFIG_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
#define mmCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
#define mmCP_ME1_PIPE0_PRIORITY 0x1e3a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index d4e8ff22ecb8..d7a17bae2584 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -17028,6 +17028,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
new file mode 100644
index 000000000000..546b043ccdf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_OFFSET_HEADER
+#define _hdp_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_hdp_hdpdec
+// base address: 0x3c80
+#define regHDP_MMHUB_TLVL 0x0000
+#define regHDP_MMHUB_TLVL_BASE_IDX 0
+#define regHDP_MMHUB_UNITID 0x0001
+#define regHDP_MMHUB_UNITID_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE 0x0040
+#define regHDP_NONSURFACE_BASE_BASE_IDX 0
+#define regHDP_NONSURFACE_INFO 0x0041
+#define regHDP_NONSURFACE_INFO_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE_HI 0x0042
+#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
+#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS 0x00c5
+#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
+#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
+#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS 0x00c8
+#define regHDP_NONSURF_FLAGS_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS_CLR 0x00c9
+#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define regHDP_HOST_PATH_CNTL 0x00cc
+#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define regHDP_SW_SEMAPHORE 0x00cd
+#define regHDP_SW_SEMAPHORE_BASE_IDX 0
+#define regHDP_DEBUG0 0x00ce
+#define regHDP_DEBUG0_BASE_IDX 0
+#define regHDP_LAST_SURFACE_HIT 0x00d0
+#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define regHDP_OUTSTANDING_REQ 0x00d2
+#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define regHDP_MISC_CNTL 0x00d3
+#define regHDP_MISC_CNTL_BASE_IDX 0
+#define regHDP_MEM_POWER_CTRL 0x00d4
+#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
+#define regHDP_MMHUB_CNTL 0x00d5
+#define regHDP_MMHUB_CNTL_BASE_IDX 0
+#define regHDP_EDC_CNT 0x00d6
+#define regHDP_EDC_CNT_BASE_IDX 0
+#define regHDP_VERSION 0x00d7
+#define regHDP_VERSION_BASE_IDX 0
+#define regHDP_CLK_CNTL 0x00d8
+#define regHDP_CLK_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_CNTL 0x00f6
+#define regHDP_MEMIO_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_ADDR 0x00f7
+#define regHDP_MEMIO_ADDR_BASE_IDX 0
+#define regHDP_MEMIO_STATUS 0x00f8
+#define regHDP_MEMIO_STATUS_BASE_IDX 0
+#define regHDP_MEMIO_WR_DATA 0x00f9
+#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define regHDP_MEMIO_RD_DATA 0x00fa
+#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define regHDP_XDP_D2H_FLUSH 0x0101
+#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_3 0x0103
+#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_4 0x0104
+#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_5 0x0105
+#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_6 0x0106
+#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_7 0x0107
+#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_8 0x0108
+#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_9 0x0109
+#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_10 0x010a
+#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_11 0x010b
+#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_12 0x010c
+#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_13 0x010d
+#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_14 0x010e
+#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_15 0x010f
+#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_16 0x0110
+#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_17 0x0111
+#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_18 0x0112
+#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_19 0x0113
+#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_20 0x0114
+#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_21 0x0115
+#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_22 0x0116
+#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_23 0x0117
+#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_24 0x0118
+#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_25 0x0119
+#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_26 0x011a
+#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_27 0x011b
+#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_28 0x011c
+#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_29 0x011d
+#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_30 0x011e
+#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_31 0x011f
+#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_32 0x0120
+#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_33 0x0121
+#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_34 0x0122
+#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR_CFG 0x0124
+#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_MC_CFG 0x012e
+#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HST_CFG 0x012f
+#define regHDP_XDP_HST_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_IPH_CFG 0x0131
+#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR0 0x0134
+#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR1 0x0135
+#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR2 0x0136
+#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR3 0x0137
+#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR4 0x0138
+#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR5 0x0139
+#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR6 0x013a
+#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR7 0x013b
+#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define regHDP_XDP_BUSY_STS 0x013e
+#define regHDP_XDP_BUSY_STS_BASE_IDX 0
+#define regHDP_XDP_STICKY 0x013f
+#define regHDP_XDP_STICKY_BASE_IDX 0
+#define regHDP_XDP_CHKN 0x0140
+#define regHDP_XDP_CHKN_BASE_IDX 0
+#define regHDP_XDP_BARS_ADDR_39_36 0x0144
+#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR 0x014a
+#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..3ccd2797936e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_SH_MASK_HEADER
+#define _hdp_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_SURFACE_WRITE_FLAGS
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
+//HDP_SURFACE_WRITE_FLAGS_CLR
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS_CLR
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE__SHIFT 0x4
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE__SHIFT 0x7
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0xc
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE__SHIFT 0x14
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1a
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1b
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE_MASK 0x00000010L
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE_MASK 0x00000080L
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00003000L
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE_MASK 0x00100000L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x04000000L
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x08000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_CTRL
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN__SHIFT 0x1
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN__SHIFT 0x2
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN__SHIFT 0x3
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0x1e
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK 0x00000004L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK 0x00000008L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0xC0000000L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
+//HDP_EDC_CNT
+#define HDP_EDC_CNT__MEM0_SED_COUNT__SHIFT 0x0
+#define HDP_EDC_CNT__MEM0_SED_COUNT_MASK 0x00000003L
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK__SHIFT 0x4
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK_MASK 0x00000010L
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x6
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003FL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000FC0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x00FFFFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+//HDP_XDP_GPU_IOV_VIOLATION_LOG2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED__SHIFT 0x4
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED__SHIFT 0xc
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED_MASK 0x00000010L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED_MASK 0x00001000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
new file mode 100644
index 000000000000..f04fa95a770c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_OFFSET_HEADER
+#define _mp_13_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+// base address: 0x0
+#define regMP0_SMN_C2PMSG_32 0x0060
+#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_33 0x0061
+#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_34 0x0062
+#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_35 0x0063
+#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_36 0x0064
+#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_37 0x0065
+#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_38 0x0066
+#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_39 0x0067
+#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_40 0x0068
+#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_41 0x0069
+#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_42 0x006a
+#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_43 0x006b
+#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_44 0x006c
+#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_45 0x006d
+#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_46 0x006e
+#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_47 0x006f
+#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_48 0x0070
+#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_49 0x0071
+#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_50 0x0072
+#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_51 0x0073
+#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_52 0x0074
+#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_53 0x0075
+#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_54 0x0076
+#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_55 0x0077
+#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_56 0x0078
+#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_57 0x0079
+#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_58 0x007a
+#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_59 0x007b
+#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_60 0x007c
+#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_61 0x007d
+#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_62 0x007e
+#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_63 0x007f
+#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_64 0x0080
+#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_65 0x0081
+#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_66 0x0082
+#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_68 0x0084
+#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_69 0x0085
+#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_70 0x0086
+#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_71 0x0087
+#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_72 0x0088
+#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_73 0x0089
+#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_74 0x008a
+#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_75 0x008b
+#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_76 0x008c
+#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_77 0x008d
+#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_78 0x008e
+#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_79 0x008f
+#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_80 0x0090
+#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_81 0x0091
+#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_82 0x0092
+#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_83 0x0093
+#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_84 0x0094
+#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_85 0x0095
+#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_86 0x0096
+#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_87 0x0097
+#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_88 0x0098
+#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_89 0x0099
+#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_90 0x009a
+#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_91 0x009b
+#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_92 0x009c
+#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_93 0x009d
+#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_94 0x009e
+#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_95 0x009f
+#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_96 0x00a0
+#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_97 0x00a1
+#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_98 0x00a2
+#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_99 0x00a3
+#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_100 0x00a4
+#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_101 0x00a5
+#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_102 0x00a6
+#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_103 0x00a7
+#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP0_SMN_IH_CREDIT 0x00c1
+#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT 0x00c2
+#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_32 0x0260
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_33 0x0261
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_34 0x0262
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_35 0x0263
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_36 0x0264
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_37 0x0265
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_38 0x0266
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_39 0x0267
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_40 0x0268
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_41 0x0269
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_42 0x026a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_43 0x026b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_44 0x026c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_45 0x026d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_46 0x026e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_47 0x026f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_48 0x0270
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_49 0x0271
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_50 0x0272
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_51 0x0273
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_52 0x0274
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_53 0x0275
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_54 0x0276
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_55 0x0277
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_56 0x0278
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_57 0x0279
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_58 0x027a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_59 0x027b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_60 0x027c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_61 0x027d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_62 0x027e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_63 0x027f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_64 0x0280
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_65 0x0281
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_66 0x0282
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_67 0x0283
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_68 0x0284
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_69 0x0285
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_70 0x0286
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_71 0x0287
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_72 0x0288
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_73 0x0289
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_74 0x028a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_75 0x028b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_76 0x028c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_77 0x028d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_78 0x028e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_79 0x028f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_80 0x0290
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_81 0x0291
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_82 0x0292
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_83 0x0293
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_84 0x0294
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_85 0x0295
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_86 0x0296
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_87 0x0297
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_88 0x0298
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_89 0x0299
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_90 0x029a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_91 0x029b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_92 0x029c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_93 0x029d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_94 0x029e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_95 0x029f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_96 0x02a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_97 0x02a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_98 0x02a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_99 0x02a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_100 0x02a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_101 0x02a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_102 0x02a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_103 0x02a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_104 0x02a8
+#define regMP1_SMN_C2PMSG_104_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_105 0x02a9
+#define regMP1_SMN_C2PMSG_105_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_106 0x02aa
+#define regMP1_SMN_C2PMSG_106_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_107 0x02ab
+#define regMP1_SMN_C2PMSG_107_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_108 0x02ac
+#define regMP1_SMN_C2PMSG_108_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_109 0x02ad
+#define regMP1_SMN_C2PMSG_109_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_110 0x02ae
+#define regMP1_SMN_C2PMSG_110_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_111 0x02af
+#define regMP1_SMN_C2PMSG_111_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_112 0x02b0
+#define regMP1_SMN_C2PMSG_112_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_113 0x02b1
+#define regMP1_SMN_C2PMSG_113_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_114 0x02b2
+#define regMP1_SMN_C2PMSG_114_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_115 0x02b3
+#define regMP1_SMN_C2PMSG_115_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_116 0x02b4
+#define regMP1_SMN_C2PMSG_116_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_117 0x02b5
+#define regMP1_SMN_C2PMSG_117_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_118 0x02b6
+#define regMP1_SMN_C2PMSG_118_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_119 0x02b7
+#define regMP1_SMN_C2PMSG_119_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_120 0x02b8
+#define regMP1_SMN_C2PMSG_120_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_121 0x02b9
+#define regMP1_SMN_C2PMSG_121_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_122 0x02ba
+#define regMP1_SMN_C2PMSG_122_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_123 0x02bb
+#define regMP1_SMN_C2PMSG_123_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_124 0x02bc
+#define regMP1_SMN_C2PMSG_124_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_125 0x02bd
+#define regMP1_SMN_C2PMSG_125_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_126 0x02be
+#define regMP1_SMN_C2PMSG_126_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_127 0x02bf
+#define regMP1_SMN_C2PMSG_127_BASE_IDX 0
+#define regMP1_SMN_IH_CREDIT 0x02c1
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT 0x02c2
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+#define regMP1_SMN_FPS_CNT 0x02c4
+#define regMP1_SMN_FPS_CNT_BASE_IDX 0
+#define regMP1_SMN_PUB_CTRL 0x02c5
+#define regMP1_SMN_PUB_CTRL_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH0 0x0340
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH1 0x0341
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH2 0x0342
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH3 0x0343
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH4 0x0344
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH5 0x0345
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH6 0x0346
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH7 0x0347
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH8 0x0348
+#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH10 0x034a
+#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH11 0x034b
+#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH12 0x034c
+#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH13 0x034d
+#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH14 0x034e
+#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH15 0x034f
+#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH16 0x0350
+#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH17 0x0351
+#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH18 0x0352
+#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH19 0x0353
+#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH20 0x0354
+#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH21 0x0355
+#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH22 0x0356
+#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH23 0x0357
+#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH24 0x0358
+#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH25 0x0359
+#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH26 0x035a
+#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH27 0x035b
+#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH28 0x035c
+#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH29 0x035d
+#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH30 0x035e
+#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH31 0x035f
+#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+// base address: 0x0
+#define regMP1_FIRMWARE_FLAGS 0xbee00a
+#define regMP1_FIRMWARE_FLAGS_BASE_IDX 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
new file mode 100644
index 000000000000..780d9824d5ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_SH_MASK_HEADER
+#define _mp_13_0_6_SH_MASK_HEADER
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_SMN_IH_SW_INT_CTRL
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH10
+#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH11
+#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH12
+#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH13
+#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH14
+#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH15
+#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH16
+#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH17
+#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH18
+#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH19
+#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH20
+#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH21
+#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH22
+#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH23
+#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH24
+#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH25
+#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH26
+#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH27
+#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH28
+#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH29
+#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH30
+#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH31
+#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+//MP1_FIRMWARE_FLAGS
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
new file mode 100644
index 000000000000..31bef0776ded
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
@@ -0,0 +1,1109 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_OFFSET_HEADER
+#define _sdma_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+// base address: 0x4980
+#define regSDMA_UCODE_ADDR 0x0000
+#define regSDMA_UCODE_ADDR_BASE_IDX 0
+#define regSDMA_UCODE_DATA 0x0001
+#define regSDMA_UCODE_DATA_BASE_IDX 0
+#define regSDMA_F32_CNTL 0x0002
+#define regSDMA_F32_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_CNTL 0x0005
+#define regSDMA_MMHUB_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_TRUSTLVL 0x0006
+#define regSDMA_MMHUB_TRUSTLVL_BASE_IDX 0
+#define regSDMA_VM_CNTL 0x0010
+#define regSDMA_VM_CNTL_BASE_IDX 0
+#define regSDMA_VM_CTX_LO 0x0011
+#define regSDMA_VM_CTX_LO_BASE_IDX 0
+#define regSDMA_VM_CTX_HI 0x0012
+#define regSDMA_VM_CTX_HI_BASE_IDX 0
+#define regSDMA_ACTIVE_FCN_ID 0x0013
+#define regSDMA_ACTIVE_FCN_ID_BASE_IDX 0
+#define regSDMA_VM_CTX_CNTL 0x0014
+#define regSDMA_VM_CTX_CNTL_BASE_IDX 0
+#define regSDMA_VIRT_RESET_REQ 0x0015
+#define regSDMA_VIRT_RESET_REQ_BASE_IDX 0
+#define regSDMA_VF_ENABLE 0x0016
+#define regSDMA_VF_ENABLE_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE0 0x0017
+#define regSDMA_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE1 0x0018
+#define regSDMA_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE2 0x0019
+#define regSDMA_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE3 0x001a
+#define regSDMA_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE0 0x001b
+#define regSDMA_PUB_REG_TYPE0_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE1 0x001c
+#define regSDMA_PUB_REG_TYPE1_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE2 0x001d
+#define regSDMA_PUB_REG_TYPE2_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE3 0x001e
+#define regSDMA_PUB_REG_TYPE3_BASE_IDX 0
+#define regSDMA_CONTEXT_GROUP_BOUNDARY 0x001f
+#define regSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH_HI 0x0020
+#define regSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH 0x0022
+#define regSDMA_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA_IB_OFFSET_FETCH 0x0023
+#define regSDMA_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA_PROGRAM 0x0024
+#define regSDMA_PROGRAM_BASE_IDX 0
+#define regSDMA_STATUS_REG 0x0025
+#define regSDMA_STATUS_REG_BASE_IDX 0
+#define regSDMA_STATUS1_REG 0x0026
+#define regSDMA_STATUS1_REG_BASE_IDX 0
+#define regSDMA_RD_BURST_CNTL 0x0027
+#define regSDMA_RD_BURST_CNTL_BASE_IDX 0
+#define regSDMA_HBM_PAGE_CONFIG 0x0028
+#define regSDMA_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA_UCODE_CHECKSUM 0x0029
+#define regSDMA_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA_FREEZE 0x002b
+#define regSDMA_FREEZE_BASE_IDX 0
+#define regSDMA_PHASE0_QUANTUM 0x002c
+#define regSDMA_PHASE0_QUANTUM_BASE_IDX 0
+#define regSDMA_PHASE1_QUANTUM 0x002d
+#define regSDMA_PHASE1_QUANTUM_BASE_IDX 0
+#define regSDMA_POWER_GATING 0x002e
+#define regSDMA_POWER_GATING_BASE_IDX 0
+#define regSDMA_PGFSM_CONFIG 0x002f
+#define regSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define regSDMA_PGFSM_WRITE 0x0030
+#define regSDMA_PGFSM_WRITE_BASE_IDX 0
+#define regSDMA_PGFSM_READ 0x0031
+#define regSDMA_PGFSM_READ_BASE_IDX 0
+#define regCC_SDMA_EDC_CONFIG 0x0032
+#define regCC_SDMA_EDC_CONFIG_BASE_IDX 0
+#define regSDMA_BA_THRESHOLD 0x0033
+#define regSDMA_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA_ID 0x0034
+#define regSDMA_ID_BASE_IDX 0
+#define regSDMA_VERSION 0x0035
+#define regSDMA_VERSION_BASE_IDX 0
+#define regSDMA_EDC_COUNTER 0x0036
+#define regSDMA_EDC_COUNTER_BASE_IDX 0
+#define regSDMA_EDC_COUNTER2 0x0037
+#define regSDMA_EDC_COUNTER2_BASE_IDX 0
+#define regSDMA_STATUS2_REG 0x0038
+#define regSDMA_STATUS2_REG_BASE_IDX 0
+#define regSDMA_ATOMIC_CNTL 0x0039
+#define regSDMA_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_LO 0x003a
+#define regSDMA_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_HI 0x003b
+#define regSDMA_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA_UTCL1_CNTL 0x003c
+#define regSDMA_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA_UTCL1_WATERMK 0x003d
+#define regSDMA_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA_UTCL1_RD_STATUS 0x003e
+#define regSDMA_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_WR_STATUS 0x003f
+#define regSDMA_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_INV0 0x0040
+#define regSDMA_UTCL1_INV0_BASE_IDX 0
+#define regSDMA_UTCL1_INV1 0x0041
+#define regSDMA_UTCL1_INV1_BASE_IDX 0
+#define regSDMA_UTCL1_INV2 0x0042
+#define regSDMA_UTCL1_INV2_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK0 0x0043
+#define regSDMA_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK1 0x0044
+#define regSDMA_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK0 0x0045
+#define regSDMA_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK1 0x0046
+#define regSDMA_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_TIMEOUT 0x0047
+#define regSDMA_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA_UTCL1_PAGE 0x0048
+#define regSDMA_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA_POWER_CNTL_IDLE 0x0049
+#define regSDMA_POWER_CNTL_IDLE_BASE_IDX 0
+#define regSDMA_RELAX_ORDERING_LUT 0x004a
+#define regSDMA_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS_2 0x004b
+#define regSDMA_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA_STATUS3_REG 0x004c
+#define regSDMA_STATUS3_REG_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PHASE2_QUANTUM 0x004f
+#define regSDMA_PHASE2_QUANTUM_BASE_IDX 0
+#define regSDMA_ERROR_LOG 0x0050
+#define regSDMA_ERROR_LOG_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG0 0x0051
+#define regSDMA_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG1 0x0052
+#define regSDMA_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG2 0x0053
+#define regSDMA_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG3 0x0054
+#define regSDMA_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA_F32_COUNTER 0x0055
+#define regSDMA_F32_COUNTER_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_MISC_CNTL 0x005a
+#define regSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_LO 0x005b
+#define regSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_HI 0x005c
+#define regSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0
+#define regSDMA_CRD_CNTL 0x005d
+#define regSDMA_CRD_CNTL_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG 0x005e
+#define regSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA_ULV_CNTL 0x005f
+#define regSDMA_ULV_CNTL_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG2 0x0062
+#define regSDMA_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA_STATUS4_REG 0x0063
+#define regSDMA_STATUS4_REG_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_DATA 0x0064
+#define regSDMA_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_ADDR 0x0065
+#define regSDMA_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA_CE_CTRL 0x0066
+#define regSDMA_CE_CTRL_BASE_IDX 0
+#define regSDMA_RAS_STATUS 0x0067
+#define regSDMA_RAS_STATUS_BASE_IDX 0
+#define regSDMA_CLK_STATUS 0x0068
+#define regSDMA_CLK_STATUS_BASE_IDX 0
+#define regSDMA_POWER_CNTL 0x006b
+#define regSDMA_POWER_CNTL_BASE_IDX 0
+#define regSDMA_CLK_CTRL 0x006c
+#define regSDMA_CLK_CTRL_BASE_IDX 0
+#define regSDMA_CNTL 0x006d
+#define regSDMA_CNTL_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS 0x006e
+#define regSDMA_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG 0x006f
+#define regSDMA_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG_READ 0x0070
+#define regSDMA_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA_GFX_RB_CNTL 0x0080
+#define regSDMA_GFX_RB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE 0x0081
+#define regSDMA_GFX_RB_BASE_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE_HI 0x0082
+#define regSDMA_GFX_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR 0x0083
+#define regSDMA_GFX_RB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_HI 0x0084
+#define regSDMA_GFX_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR 0x0085
+#define regSDMA_GFX_RB_WPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_HI 0x0086
+#define regSDMA_GFX_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_CNTL 0x008a
+#define regSDMA_GFX_IB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_IB_RPTR 0x008b
+#define regSDMA_GFX_IB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_IB_OFFSET 0x008c
+#define regSDMA_GFX_IB_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_LO 0x008d
+#define regSDMA_GFX_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_HI 0x008e
+#define regSDMA_GFX_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SIZE 0x008f
+#define regSDMA_GFX_IB_SIZE_BASE_IDX 0
+#define regSDMA_GFX_SKIP_CNTL 0x0090
+#define regSDMA_GFX_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_STATUS 0x0091
+#define regSDMA_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL 0x0092
+#define regSDMA_GFX_DOORBELL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_CNTL 0x0093
+#define regSDMA_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define regSDMA_GFX_STATUS 0x00a8
+#define regSDMA_GFX_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_LOG 0x00a9
+#define regSDMA_GFX_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_GFX_WATERMARK 0x00aa
+#define regSDMA_GFX_WATERMARK_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_OFFSET 0x00ab
+#define regSDMA_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_LO 0x00ac
+#define regSDMA_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_HI 0x00ad
+#define regSDMA_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SUB_REMAIN 0x00af
+#define regSDMA_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_GFX_PREEMPT 0x00b0
+#define regSDMA_GFX_PREEMPT_BASE_IDX 0
+#define regSDMA_GFX_DUMMY_REG 0x00b1
+#define regSDMA_GFX_DUMMY_REG_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_RB_AQL_CNTL 0x00b4
+#define regSDMA_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA0 0x00c0
+#define regSDMA_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA1 0x00c1
+#define regSDMA_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA2 0x00c2
+#define regSDMA_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA3 0x00c3
+#define regSDMA_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA4 0x00c4
+#define regSDMA_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA5 0x00c5
+#define regSDMA_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA6 0x00c6
+#define regSDMA_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA7 0x00c7
+#define regSDMA_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA8 0x00c8
+#define regSDMA_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA9 0x00c9
+#define regSDMA_GFX_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA10 0x00ca
+#define regSDMA_GFX_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_CNTL 0x00cb
+#define regSDMA_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_CNTL 0x00d8
+#define regSDMA_PAGE_RB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE 0x00d9
+#define regSDMA_PAGE_RB_BASE_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE_HI 0x00da
+#define regSDMA_PAGE_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR 0x00db
+#define regSDMA_PAGE_RB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_HI 0x00dc
+#define regSDMA_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR 0x00dd
+#define regSDMA_PAGE_RB_WPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_HI 0x00de
+#define regSDMA_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_CNTL 0x00e2
+#define regSDMA_PAGE_IB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_IB_RPTR 0x00e3
+#define regSDMA_PAGE_IB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_IB_OFFSET 0x00e4
+#define regSDMA_PAGE_IB_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_LO 0x00e5
+#define regSDMA_PAGE_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_HI 0x00e6
+#define regSDMA_PAGE_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SIZE 0x00e7
+#define regSDMA_PAGE_IB_SIZE_BASE_IDX 0
+#define regSDMA_PAGE_SKIP_CNTL 0x00e8
+#define regSDMA_PAGE_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_CONTEXT_STATUS 0x00e9
+#define regSDMA_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL 0x00ea
+#define regSDMA_PAGE_DOORBELL_BASE_IDX 0
+#define regSDMA_PAGE_STATUS 0x0100
+#define regSDMA_PAGE_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_LOG 0x0101
+#define regSDMA_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_PAGE_WATERMARK 0x0102
+#define regSDMA_PAGE_WATERMARK_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_OFFSET 0x0103
+#define regSDMA_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_LO 0x0104
+#define regSDMA_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_HI 0x0105
+#define regSDMA_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SUB_REMAIN 0x0107
+#define regSDMA_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_PAGE_PREEMPT 0x0108
+#define regSDMA_PAGE_PREEMPT_BASE_IDX 0
+#define regSDMA_PAGE_DUMMY_REG 0x0109
+#define regSDMA_PAGE_DUMMY_REG_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_RB_AQL_CNTL 0x010c
+#define regSDMA_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_MINOR_PTR_UPDATE 0x010d
+#define regSDMA_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA0 0x0118
+#define regSDMA_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA1 0x0119
+#define regSDMA_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA2 0x011a
+#define regSDMA_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA3 0x011b
+#define regSDMA_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA4 0x011c
+#define regSDMA_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA5 0x011d
+#define regSDMA_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA6 0x011e
+#define regSDMA_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA7 0x011f
+#define regSDMA_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA8 0x0120
+#define regSDMA_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA9 0x0121
+#define regSDMA_PAGE_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA10 0x0122
+#define regSDMA_PAGE_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_CNTL 0x0123
+#define regSDMA_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_CNTL 0x0130
+#define regSDMA_RLC0_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE 0x0131
+#define regSDMA_RLC0_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE_HI 0x0132
+#define regSDMA_RLC0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR 0x0133
+#define regSDMA_RLC0_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_HI 0x0134
+#define regSDMA_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR 0x0135
+#define regSDMA_RLC0_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_HI 0x0136
+#define regSDMA_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_CNTL 0x013a
+#define regSDMA_RLC0_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_IB_RPTR 0x013b
+#define regSDMA_RLC0_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_IB_OFFSET 0x013c
+#define regSDMA_RLC0_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_LO 0x013d
+#define regSDMA_RLC0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_HI 0x013e
+#define regSDMA_RLC0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SIZE 0x013f
+#define regSDMA_RLC0_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC0_SKIP_CNTL 0x0140
+#define regSDMA_RLC0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_CONTEXT_STATUS 0x0141
+#define regSDMA_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL 0x0142
+#define regSDMA_RLC0_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC0_STATUS 0x0158
+#define regSDMA_RLC0_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_LOG 0x0159
+#define regSDMA_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC0_WATERMARK 0x015a
+#define regSDMA_RLC0_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_OFFSET 0x015b
+#define regSDMA_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_LO 0x015c
+#define regSDMA_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_HI 0x015d
+#define regSDMA_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SUB_REMAIN 0x015f
+#define regSDMA_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC0_PREEMPT 0x0160
+#define regSDMA_RLC0_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC0_DUMMY_REG 0x0161
+#define regSDMA_RLC0_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_RB_AQL_CNTL 0x0164
+#define regSDMA_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_MINOR_PTR_UPDATE 0x0165
+#define regSDMA_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA0 0x0170
+#define regSDMA_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA1 0x0171
+#define regSDMA_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA2 0x0172
+#define regSDMA_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA3 0x0173
+#define regSDMA_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA4 0x0174
+#define regSDMA_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA5 0x0175
+#define regSDMA_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA6 0x0176
+#define regSDMA_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA7 0x0177
+#define regSDMA_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA8 0x0178
+#define regSDMA_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA9 0x0179
+#define regSDMA_RLC0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA10 0x017a
+#define regSDMA_RLC0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_CNTL 0x017b
+#define regSDMA_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_CNTL 0x0188
+#define regSDMA_RLC1_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE 0x0189
+#define regSDMA_RLC1_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE_HI 0x018a
+#define regSDMA_RLC1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR 0x018b
+#define regSDMA_RLC1_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_HI 0x018c
+#define regSDMA_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR 0x018d
+#define regSDMA_RLC1_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_HI 0x018e
+#define regSDMA_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_CNTL 0x0192
+#define regSDMA_RLC1_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_IB_RPTR 0x0193
+#define regSDMA_RLC1_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_IB_OFFSET 0x0194
+#define regSDMA_RLC1_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_LO 0x0195
+#define regSDMA_RLC1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_HI 0x0196
+#define regSDMA_RLC1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SIZE 0x0197
+#define regSDMA_RLC1_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC1_SKIP_CNTL 0x0198
+#define regSDMA_RLC1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_CONTEXT_STATUS 0x0199
+#define regSDMA_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL 0x019a
+#define regSDMA_RLC1_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC1_STATUS 0x01b0
+#define regSDMA_RLC1_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_LOG 0x01b1
+#define regSDMA_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC1_WATERMARK 0x01b2
+#define regSDMA_RLC1_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_OFFSET 0x01b3
+#define regSDMA_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_LO 0x01b4
+#define regSDMA_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_HI 0x01b5
+#define regSDMA_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SUB_REMAIN 0x01b7
+#define regSDMA_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC1_PREEMPT 0x01b8
+#define regSDMA_RLC1_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC1_DUMMY_REG 0x01b9
+#define regSDMA_RLC1_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_RB_AQL_CNTL 0x01bc
+#define regSDMA_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA0 0x01c8
+#define regSDMA_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA1 0x01c9
+#define regSDMA_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA2 0x01ca
+#define regSDMA_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA3 0x01cb
+#define regSDMA_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA4 0x01cc
+#define regSDMA_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA5 0x01cd
+#define regSDMA_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA6 0x01ce
+#define regSDMA_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA7 0x01cf
+#define regSDMA_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA8 0x01d0
+#define regSDMA_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA9 0x01d1
+#define regSDMA_RLC1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA10 0x01d2
+#define regSDMA_RLC1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_CNTL 0x01d3
+#define regSDMA_RLC1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_CNTL 0x01e0
+#define regSDMA_RLC2_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE 0x01e1
+#define regSDMA_RLC2_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE_HI 0x01e2
+#define regSDMA_RLC2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR 0x01e3
+#define regSDMA_RLC2_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_HI 0x01e4
+#define regSDMA_RLC2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR 0x01e5
+#define regSDMA_RLC2_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_HI 0x01e6
+#define regSDMA_RLC2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_CNTL 0x01ea
+#define regSDMA_RLC2_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_IB_RPTR 0x01eb
+#define regSDMA_RLC2_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_IB_OFFSET 0x01ec
+#define regSDMA_RLC2_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_LO 0x01ed
+#define regSDMA_RLC2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_HI 0x01ee
+#define regSDMA_RLC2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SIZE 0x01ef
+#define regSDMA_RLC2_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC2_SKIP_CNTL 0x01f0
+#define regSDMA_RLC2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_CONTEXT_STATUS 0x01f1
+#define regSDMA_RLC2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL 0x01f2
+#define regSDMA_RLC2_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC2_STATUS 0x0208
+#define regSDMA_RLC2_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_LOG 0x0209
+#define regSDMA_RLC2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC2_WATERMARK 0x020a
+#define regSDMA_RLC2_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_OFFSET 0x020b
+#define regSDMA_RLC2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_LO 0x020c
+#define regSDMA_RLC2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_HI 0x020d
+#define regSDMA_RLC2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SUB_REMAIN 0x020f
+#define regSDMA_RLC2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC2_PREEMPT 0x0210
+#define regSDMA_RLC2_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC2_DUMMY_REG 0x0211
+#define regSDMA_RLC2_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_RB_AQL_CNTL 0x0214
+#define regSDMA_RLC2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_MINOR_PTR_UPDATE 0x0215
+#define regSDMA_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA0 0x0220
+#define regSDMA_RLC2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA1 0x0221
+#define regSDMA_RLC2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA2 0x0222
+#define regSDMA_RLC2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA3 0x0223
+#define regSDMA_RLC2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA4 0x0224
+#define regSDMA_RLC2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA5 0x0225
+#define regSDMA_RLC2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA6 0x0226
+#define regSDMA_RLC2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA7 0x0227
+#define regSDMA_RLC2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA8 0x0228
+#define regSDMA_RLC2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA9 0x0229
+#define regSDMA_RLC2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA10 0x022a
+#define regSDMA_RLC2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_CNTL 0x022b
+#define regSDMA_RLC2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_CNTL 0x0238
+#define regSDMA_RLC3_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE 0x0239
+#define regSDMA_RLC3_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE_HI 0x023a
+#define regSDMA_RLC3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR 0x023b
+#define regSDMA_RLC3_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_HI 0x023c
+#define regSDMA_RLC3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR 0x023d
+#define regSDMA_RLC3_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_HI 0x023e
+#define regSDMA_RLC3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_CNTL 0x0242
+#define regSDMA_RLC3_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_IB_RPTR 0x0243
+#define regSDMA_RLC3_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_IB_OFFSET 0x0244
+#define regSDMA_RLC3_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_LO 0x0245
+#define regSDMA_RLC3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_HI 0x0246
+#define regSDMA_RLC3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SIZE 0x0247
+#define regSDMA_RLC3_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC3_SKIP_CNTL 0x0248
+#define regSDMA_RLC3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_CONTEXT_STATUS 0x0249
+#define regSDMA_RLC3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL 0x024a
+#define regSDMA_RLC3_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC3_STATUS 0x0260
+#define regSDMA_RLC3_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_LOG 0x0261
+#define regSDMA_RLC3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC3_WATERMARK 0x0262
+#define regSDMA_RLC3_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_OFFSET 0x0263
+#define regSDMA_RLC3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_LO 0x0264
+#define regSDMA_RLC3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_HI 0x0265
+#define regSDMA_RLC3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SUB_REMAIN 0x0267
+#define regSDMA_RLC3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC3_PREEMPT 0x0268
+#define regSDMA_RLC3_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC3_DUMMY_REG 0x0269
+#define regSDMA_RLC3_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_RB_AQL_CNTL 0x026c
+#define regSDMA_RLC3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_MINOR_PTR_UPDATE 0x026d
+#define regSDMA_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA0 0x0278
+#define regSDMA_RLC3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA1 0x0279
+#define regSDMA_RLC3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA2 0x027a
+#define regSDMA_RLC3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA3 0x027b
+#define regSDMA_RLC3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA4 0x027c
+#define regSDMA_RLC3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA5 0x027d
+#define regSDMA_RLC3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA6 0x027e
+#define regSDMA_RLC3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA7 0x027f
+#define regSDMA_RLC3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA8 0x0280
+#define regSDMA_RLC3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA9 0x0281
+#define regSDMA_RLC3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA10 0x0282
+#define regSDMA_RLC3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_CNTL 0x0283
+#define regSDMA_RLC3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_CNTL 0x0290
+#define regSDMA_RLC4_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE 0x0291
+#define regSDMA_RLC4_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE_HI 0x0292
+#define regSDMA_RLC4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR 0x0293
+#define regSDMA_RLC4_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_HI 0x0294
+#define regSDMA_RLC4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR 0x0295
+#define regSDMA_RLC4_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_HI 0x0296
+#define regSDMA_RLC4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_CNTL 0x029a
+#define regSDMA_RLC4_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_IB_RPTR 0x029b
+#define regSDMA_RLC4_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_IB_OFFSET 0x029c
+#define regSDMA_RLC4_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_LO 0x029d
+#define regSDMA_RLC4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_HI 0x029e
+#define regSDMA_RLC4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SIZE 0x029f
+#define regSDMA_RLC4_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC4_SKIP_CNTL 0x02a0
+#define regSDMA_RLC4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_CONTEXT_STATUS 0x02a1
+#define regSDMA_RLC4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL 0x02a2
+#define regSDMA_RLC4_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC4_STATUS 0x02b8
+#define regSDMA_RLC4_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_LOG 0x02b9
+#define regSDMA_RLC4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC4_WATERMARK 0x02ba
+#define regSDMA_RLC4_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_OFFSET 0x02bb
+#define regSDMA_RLC4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_LO 0x02bc
+#define regSDMA_RLC4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_HI 0x02bd
+#define regSDMA_RLC4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SUB_REMAIN 0x02bf
+#define regSDMA_RLC4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC4_PREEMPT 0x02c0
+#define regSDMA_RLC4_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC4_DUMMY_REG 0x02c1
+#define regSDMA_RLC4_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_RB_AQL_CNTL 0x02c4
+#define regSDMA_RLC4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA0 0x02d0
+#define regSDMA_RLC4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA1 0x02d1
+#define regSDMA_RLC4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA2 0x02d2
+#define regSDMA_RLC4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA3 0x02d3
+#define regSDMA_RLC4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA4 0x02d4
+#define regSDMA_RLC4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA5 0x02d5
+#define regSDMA_RLC4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA6 0x02d6
+#define regSDMA_RLC4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA7 0x02d7
+#define regSDMA_RLC4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA8 0x02d8
+#define regSDMA_RLC4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA9 0x02d9
+#define regSDMA_RLC4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA10 0x02da
+#define regSDMA_RLC4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_CNTL 0x02db
+#define regSDMA_RLC4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_CNTL 0x02e8
+#define regSDMA_RLC5_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE 0x02e9
+#define regSDMA_RLC5_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE_HI 0x02ea
+#define regSDMA_RLC5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR 0x02eb
+#define regSDMA_RLC5_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_HI 0x02ec
+#define regSDMA_RLC5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR 0x02ed
+#define regSDMA_RLC5_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_HI 0x02ee
+#define regSDMA_RLC5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_CNTL 0x02f2
+#define regSDMA_RLC5_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_IB_RPTR 0x02f3
+#define regSDMA_RLC5_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_IB_OFFSET 0x02f4
+#define regSDMA_RLC5_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_LO 0x02f5
+#define regSDMA_RLC5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_HI 0x02f6
+#define regSDMA_RLC5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SIZE 0x02f7
+#define regSDMA_RLC5_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC5_SKIP_CNTL 0x02f8
+#define regSDMA_RLC5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_CONTEXT_STATUS 0x02f9
+#define regSDMA_RLC5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL 0x02fa
+#define regSDMA_RLC5_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC5_STATUS 0x0310
+#define regSDMA_RLC5_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_LOG 0x0311
+#define regSDMA_RLC5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC5_WATERMARK 0x0312
+#define regSDMA_RLC5_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_OFFSET 0x0313
+#define regSDMA_RLC5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_LO 0x0314
+#define regSDMA_RLC5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_HI 0x0315
+#define regSDMA_RLC5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SUB_REMAIN 0x0317
+#define regSDMA_RLC5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC5_PREEMPT 0x0318
+#define regSDMA_RLC5_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC5_DUMMY_REG 0x0319
+#define regSDMA_RLC5_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_RB_AQL_CNTL 0x031c
+#define regSDMA_RLC5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_MINOR_PTR_UPDATE 0x031d
+#define regSDMA_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA0 0x0328
+#define regSDMA_RLC5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA1 0x0329
+#define regSDMA_RLC5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA2 0x032a
+#define regSDMA_RLC5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA3 0x032b
+#define regSDMA_RLC5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA4 0x032c
+#define regSDMA_RLC5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA5 0x032d
+#define regSDMA_RLC5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA6 0x032e
+#define regSDMA_RLC5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA7 0x032f
+#define regSDMA_RLC5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA8 0x0330
+#define regSDMA_RLC5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA9 0x0331
+#define regSDMA_RLC5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA10 0x0332
+#define regSDMA_RLC5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_CNTL 0x0333
+#define regSDMA_RLC5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_CNTL 0x0340
+#define regSDMA_RLC6_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE 0x0341
+#define regSDMA_RLC6_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE_HI 0x0342
+#define regSDMA_RLC6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR 0x0343
+#define regSDMA_RLC6_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_HI 0x0344
+#define regSDMA_RLC6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR 0x0345
+#define regSDMA_RLC6_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_HI 0x0346
+#define regSDMA_RLC6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_CNTL 0x034a
+#define regSDMA_RLC6_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_IB_RPTR 0x034b
+#define regSDMA_RLC6_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_IB_OFFSET 0x034c
+#define regSDMA_RLC6_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_LO 0x034d
+#define regSDMA_RLC6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_HI 0x034e
+#define regSDMA_RLC6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SIZE 0x034f
+#define regSDMA_RLC6_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC6_SKIP_CNTL 0x0350
+#define regSDMA_RLC6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_CONTEXT_STATUS 0x0351
+#define regSDMA_RLC6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL 0x0352
+#define regSDMA_RLC6_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC6_STATUS 0x0368
+#define regSDMA_RLC6_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_LOG 0x0369
+#define regSDMA_RLC6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC6_WATERMARK 0x036a
+#define regSDMA_RLC6_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_OFFSET 0x036b
+#define regSDMA_RLC6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_LO 0x036c
+#define regSDMA_RLC6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_HI 0x036d
+#define regSDMA_RLC6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SUB_REMAIN 0x036f
+#define regSDMA_RLC6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC6_PREEMPT 0x0370
+#define regSDMA_RLC6_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC6_DUMMY_REG 0x0371
+#define regSDMA_RLC6_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_RB_AQL_CNTL 0x0374
+#define regSDMA_RLC6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_MINOR_PTR_UPDATE 0x0375
+#define regSDMA_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA0 0x0380
+#define regSDMA_RLC6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA1 0x0381
+#define regSDMA_RLC6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA2 0x0382
+#define regSDMA_RLC6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA3 0x0383
+#define regSDMA_RLC6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA4 0x0384
+#define regSDMA_RLC6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA5 0x0385
+#define regSDMA_RLC6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA6 0x0386
+#define regSDMA_RLC6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA7 0x0387
+#define regSDMA_RLC6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA8 0x0388
+#define regSDMA_RLC6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA9 0x0389
+#define regSDMA_RLC6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA10 0x038a
+#define regSDMA_RLC6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_CNTL 0x038b
+#define regSDMA_RLC6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_CNTL 0x0398
+#define regSDMA_RLC7_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE 0x0399
+#define regSDMA_RLC7_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE_HI 0x039a
+#define regSDMA_RLC7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR 0x039b
+#define regSDMA_RLC7_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_HI 0x039c
+#define regSDMA_RLC7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR 0x039d
+#define regSDMA_RLC7_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_HI 0x039e
+#define regSDMA_RLC7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_CNTL 0x03a2
+#define regSDMA_RLC7_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_IB_RPTR 0x03a3
+#define regSDMA_RLC7_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_IB_OFFSET 0x03a4
+#define regSDMA_RLC7_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_LO 0x03a5
+#define regSDMA_RLC7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_HI 0x03a6
+#define regSDMA_RLC7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SIZE 0x03a7
+#define regSDMA_RLC7_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC7_SKIP_CNTL 0x03a8
+#define regSDMA_RLC7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_CONTEXT_STATUS 0x03a9
+#define regSDMA_RLC7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL 0x03aa
+#define regSDMA_RLC7_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC7_STATUS 0x03c0
+#define regSDMA_RLC7_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_LOG 0x03c1
+#define regSDMA_RLC7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC7_WATERMARK 0x03c2
+#define regSDMA_RLC7_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_OFFSET 0x03c3
+#define regSDMA_RLC7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_LO 0x03c4
+#define regSDMA_RLC7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_HI 0x03c5
+#define regSDMA_RLC7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SUB_REMAIN 0x03c7
+#define regSDMA_RLC7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC7_PREEMPT 0x03c8
+#define regSDMA_RLC7_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC7_DUMMY_REG 0x03c9
+#define regSDMA_RLC7_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_RB_AQL_CNTL 0x03cc
+#define regSDMA_RLC7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define regSDMA_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA0 0x03d8
+#define regSDMA_RLC7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA1 0x03d9
+#define regSDMA_RLC7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA2 0x03da
+#define regSDMA_RLC7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA3 0x03db
+#define regSDMA_RLC7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA4 0x03dc
+#define regSDMA_RLC7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA5 0x03dd
+#define regSDMA_RLC7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA6 0x03de
+#define regSDMA_RLC7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA7 0x03df
+#define regSDMA_RLC7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA8 0x03e0
+#define regSDMA_RLC7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA9 0x03e1
+#define regSDMA_RLC7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA10 0x03e2
+#define regSDMA_RLC7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_CNTL 0x03e3
+#define regSDMA_RLC7_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..e46cb3339355
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
@@ -0,0 +1,3276 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_SH_MASK_HEADER
+#define _sdma_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+//SDMA_UCODE_ADDR
+#define SDMA_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA_UCODE_ADDR__VALUE_MASK 0x00003FFFL
+//SDMA_UCODE_DATA
+#define SDMA_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_CNTL
+#define SDMA_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA_F32_CNTL__RESET__SHIFT 0x8
+#define SDMA_F32_CNTL__CHECKSUM_CLR__SHIFT 0x9
+#define SDMA_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA_F32_CNTL__STEP_MASK 0x00000002L
+#define SDMA_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA_F32_CNTL__RESET_MASK 0x00000100L
+#define SDMA_F32_CNTL__CHECKSUM_CLR_MASK 0x00000200L
+//SDMA_MMHUB_CNTL
+#define SDMA_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA_MMHUB_TRUSTLVL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x4
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x8
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0xc
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0x10
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0x14
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x18
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x1c
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x0000000FL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x000000F0L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x00000F00L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x0000F000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x000F0000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00F00000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x0F000000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7_MASK 0xF0000000L
+//SDMA_VM_CNTL
+#define SDMA_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA_VM_CTX_LO
+#define SDMA_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_VM_CTX_HI
+#define SDMA_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_ACTIVE_FCN_ID
+#define SDMA_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA_VM_CTX_CNTL
+#define SDMA_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA_VIRT_RESET_REQ
+#define SDMA_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA_VF_ENABLE
+#define SDMA_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA_CONTEXT_REG_TYPE0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA_CONTEXT_REG_TYPE1
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA_CONTEXT_REG_TYPE2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA_CONTEXT_REG_TYPE3
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_PUB_REG_TYPE0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10_MASK 0x00007C00L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE1__SDMA_ID__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ID_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE3
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE3__RESERVED__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE3__RESERVED_MASK 0xFFF80000L
+//SDMA_CONTEXT_GROUP_BOUNDARY
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH_HI
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH
+#define SDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA_IB_OFFSET_FETCH
+#define SDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA_PROGRAM
+#define SDMA_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA_STATUS_REG
+#define SDMA_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA_STATUS1_REG
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA_STATUS1_REG__CE_DRM_FULL__SHIFT 0xb
+#define SDMA_STATUS1_REG__CE_DRM1_FULL__SHIFT 0xc
+#define SDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0x10
+#define SDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA_STATUS1_REG__CE_DRM_FULL_MASK 0x00000800L
+#define SDMA_STATUS1_REG__CE_DRM1_FULL_MASK 0x00001000L
+#define SDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00010000L
+#define SDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA_RD_BURST_CNTL
+#define SDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA_HBM_PAGE_CONFIG
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA_UCODE_CHECKSUM
+#define SDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA_FREEZE
+#define SDMA_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA_PHASE0_QUANTUM
+#define SDMA_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_PHASE1_QUANTUM
+#define SDMA_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_POWER_GATING
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION__SHIFT 0x0
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION__SHIFT 0x1
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ__SHIFT 0x3
+#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION_MASK 0x00000001L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION_MASK 0x00000002L
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ_MASK 0x00000008L
+#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//SDMA_PGFSM_CONFIG
+#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//SDMA_PGFSM_WRITE
+#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PGFSM_READ
+#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//CC_SDMA_EDC_CONFIG
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_SDMA_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_SDMA_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SDMA_BA_THRESHOLD
+#define SDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA_ID
+#define SDMA_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA_VERSION
+#define SDMA_VERSION__MINVER__SHIFT 0x0
+#define SDMA_VERSION__MAJVER__SHIFT 0x8
+#define SDMA_VERSION__REV__SHIFT 0x10
+#define SDMA_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA_VERSION__REV_MASK 0x003F0000L
+//SDMA_EDC_COUNTER
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L
+//SDMA_EDC_COUNTER2
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L
+//SDMA_STATUS2_REG
+#define SDMA_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA_ATOMIC_CNTL
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA_ATOMIC_PREOP_LO
+#define SDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA_ATOMIC_PREOP_HI
+#define SDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_CNTL
+#define SDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA_UTCL1_WATERMK
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8
+#define SDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L
+#define SDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L
+//SDMA_UTCL1_RD_STATUS
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA_UTCL1_WR_STATUS
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8__SHIFT 0x8
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL__SHIFT 0x10
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17__SHIFT 0x11
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8_MASK 0x00000100L
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17_MASK 0x00020000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA_UTCL1_INV0
+#define SDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA_UTCL1_INV1
+#define SDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_INV2
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK1
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_WR_XNACK0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_WR_XNACK1
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_TIMEOUT
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA_UTCL1_PAGE
+#define SDMA_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0xa
+#define SDMA_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC_MASK 0x00000400L
+//SDMA_POWER_CNTL_IDLE
+#define SDMA_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA_RELAX_ORDERING_LUT
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA_CHICKEN_BITS_2
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+//SDMA_STATUS3_REG
+#define SDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA_PHYSICAL_ADDR_LO
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA_PHYSICAL_ADDR_HI
+#define SDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA_PHASE2_QUANTUM
+#define SDMA_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_ERROR_LOG
+#define SDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA_PUB_DUMMY_REG0
+#define SDMA_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG1
+#define SDMA_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG2
+#define SDMA_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG3
+#define SDMA_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_COUNTER
+#define SDMA_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA_PERFCNT_MISC_CNTL
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L
+//SDMA_PERFCNT_PERFCOUNTER_LO
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER_HI
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA_CRD_CNTL
+#define SDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA_GPU_IOV_VIOLATION_LOG
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA_ULV_CNTL
+#define SDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA_EA_DBIT_ADDR_DATA
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_EA_DBIT_ADDR_INDEX
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA_GPU_IOV_VIOLATION_LOG2
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA_STATUS4_REG
+#define SDMA_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7
+#define SDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8
+#define SDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x13
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS__SHIFT 0x14
+#define SDMA_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L
+#define SDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L
+#define SDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00080000L
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS_MASK 0x00100000L
+//SDMA_SCRATCH_RAM_DATA
+#define SDMA_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA_SCRATCH_RAM_ADDR
+#define SDMA_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA_CE_CTRL
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA_CE_CTRL__RESERVED__SHIFT 0x8
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L
+//SDMA_RAS_STATUS
+#define SDMA_RAS_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA_RAS_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA_RAS_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA_RAS_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA_RAS_STATUS__SRAM_ECC__SHIFT 0x5
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR__SHIFT 0xa
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR__SHIFT 0xb
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR__SHIFT 0xc
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR__SHIFT 0xd
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY__SHIFT 0xe
+#define SDMA_RAS_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA_RAS_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA_RAS_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA_RAS_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA_RAS_STATUS__SRAM_ECC_MASK 0x00000020L
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR_MASK 0x00000400L
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR_MASK 0x00000800L
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR_MASK 0x00001000L
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR_MASK 0x00002000L
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY_MASK 0x00004000L
+//SDMA_CLK_STATUS
+#define SDMA_CLK_STATUS__DYN_CLK__SHIFT 0x0
+#define SDMA_CLK_STATUS__PTR_CLK__SHIFT 0x1
+#define SDMA_CLK_STATUS__REG_CLK__SHIFT 0x2
+#define SDMA_CLK_STATUS__F32_CLK__SHIFT 0x3
+#define SDMA_CLK_STATUS__CE_CLK__SHIFT 0x4
+#define SDMA_CLK_STATUS__PERF_CLK__SHIFT 0x5
+#define SDMA_CLK_STATUS__DYN_CLK_MASK 0x00000001L
+#define SDMA_CLK_STATUS__PTR_CLK_MASK 0x00000002L
+#define SDMA_CLK_STATUS__REG_CLK_MASK 0x00000004L
+#define SDMA_CLK_STATUS__F32_CLK_MASK 0x00000008L
+#define SDMA_CLK_STATUS__CE_CLK_MASK 0x00000010L
+#define SDMA_CLK_STATUS__PERF_CLK_MASK 0x00000020L
+//SDMA_POWER_CNTL
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+//SDMA_CLK_CTRL
+#define SDMA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA_CNTL
+#define SDMA_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE__SHIFT 0x7
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE__SHIFT 0x8
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE__SHIFT 0x9
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE__SHIFT 0xa
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xe
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE_MASK 0x00000080L
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE_MASK 0x00000100L
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE_MASK 0x00000200L
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE_MASK 0x00000400L
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00004000L
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA_CHICKEN_BITS
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE__SHIFT 0x1a
+#define SDMA_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE_MASK 0x04000000L
+#define SDMA_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA_GB_ADDR_CONFIG
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GB_ADDR_CONFIG_READ
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GFX_RB_CNTL
+#define SDMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_GFX_RB_BASE
+#define SDMA_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_BASE_HI
+#define SDMA_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_GFX_RB_RPTR
+#define SDMA_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_HI
+#define SDMA_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR
+#define SDMA_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_HI
+#define SDMA_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_CNTL
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_GFX_RB_RPTR_ADDR_HI
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_ADDR_LO
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_IB_CNTL
+#define SDMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_GFX_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_GFX_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_GFX_IB_RPTR
+#define SDMA_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_OFFSET
+#define SDMA_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_BASE_LO
+#define SDMA_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_GFX_IB_BASE_HI
+#define SDMA_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SIZE
+#define SDMA_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_SKIP_CNTL
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_GFX_CONTEXT_STATUS
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_GFX_DOORBELL
+#define SDMA_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_GFX_CONTEXT_CNTL
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL__SHIFT 0x18
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL_MASK 0x0F000000L
+//SDMA_GFX_STATUS
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_GFX_DOORBELL_LOG
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_GFX_WATERMARK
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_GFX_DOORBELL_OFFSET
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_GFX_CSA_ADDR_LO
+#define SDMA_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_CSA_ADDR_HI
+#define SDMA_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SUB_REMAIN
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_PREEMPT
+#define SDMA_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_GFX_DUMMY_REG
+#define SDMA_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_RB_AQL_CNTL
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_GFX_MINOR_PTR_UPDATE
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_GFX_MIDCMD_DATA0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA1
+#define SDMA_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA2
+#define SDMA_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA3
+#define SDMA_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA4
+#define SDMA_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA5
+#define SDMA_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA6
+#define SDMA_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA7
+#define SDMA_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA8
+#define SDMA_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA9
+#define SDMA_GFX_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA10
+#define SDMA_GFX_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_CNTL
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_PAGE_RB_CNTL
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_PAGE_RB_BASE
+#define SDMA_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_BASE_HI
+#define SDMA_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_PAGE_RB_RPTR
+#define SDMA_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_HI
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR
+#define SDMA_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_HI
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_PAGE_RB_RPTR_ADDR_HI
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_ADDR_LO
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_IB_CNTL
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_PAGE_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_PAGE_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_PAGE_IB_RPTR
+#define SDMA_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_OFFSET
+#define SDMA_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_BASE_LO
+#define SDMA_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_PAGE_IB_BASE_HI
+#define SDMA_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SIZE
+#define SDMA_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_SKIP_CNTL
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_PAGE_CONTEXT_STATUS
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_PAGE_DOORBELL
+#define SDMA_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_PAGE_STATUS
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_PAGE_DOORBELL_LOG
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_PAGE_WATERMARK
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_PAGE_DOORBELL_OFFSET
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_PAGE_CSA_ADDR_LO
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_CSA_ADDR_HI
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SUB_REMAIN
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_PREEMPT
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_PAGE_DUMMY_REG
+#define SDMA_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_RB_AQL_CNTL
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_PAGE_MINOR_PTR_UPDATE
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_PAGE_MIDCMD_DATA0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA1
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA2
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA3
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA4
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA5
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA6
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA7
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA8
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA9
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA10
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_CNTL
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC0_RB_CNTL
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC0_RB_BASE
+#define SDMA_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_BASE_HI
+#define SDMA_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC0_RB_RPTR
+#define SDMA_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_HI
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR
+#define SDMA_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_HI
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC0_RB_RPTR_ADDR_HI
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_ADDR_LO
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_IB_CNTL
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC0_IB_RPTR
+#define SDMA_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_OFFSET
+#define SDMA_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_BASE_LO
+#define SDMA_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC0_IB_BASE_HI
+#define SDMA_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SIZE
+#define SDMA_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_SKIP_CNTL
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC0_CONTEXT_STATUS
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC0_DOORBELL
+#define SDMA_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC0_STATUS
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC0_DOORBELL_LOG
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC0_WATERMARK
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC0_DOORBELL_OFFSET
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC0_CSA_ADDR_LO
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_CSA_ADDR_HI
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SUB_REMAIN
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_PREEMPT
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC0_DUMMY_REG
+#define SDMA_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_RB_AQL_CNTL
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC0_MINOR_PTR_UPDATE
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC0_MIDCMD_DATA0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA1
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA2
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA3
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA4
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA5
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA6
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA7
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA8
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA9
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA10
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_CNTL
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC1_RB_CNTL
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC1_RB_BASE
+#define SDMA_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_BASE_HI
+#define SDMA_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC1_RB_RPTR
+#define SDMA_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_HI
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR
+#define SDMA_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_HI
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC1_RB_RPTR_ADDR_HI
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_ADDR_LO
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_IB_CNTL
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC1_IB_RPTR
+#define SDMA_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_OFFSET
+#define SDMA_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_BASE_LO
+#define SDMA_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC1_IB_BASE_HI
+#define SDMA_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SIZE
+#define SDMA_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_SKIP_CNTL
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC1_CONTEXT_STATUS
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC1_DOORBELL
+#define SDMA_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC1_STATUS
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC1_DOORBELL_LOG
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC1_WATERMARK
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC1_DOORBELL_OFFSET
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC1_CSA_ADDR_LO
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_CSA_ADDR_HI
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SUB_REMAIN
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_PREEMPT
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC1_DUMMY_REG
+#define SDMA_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_RB_AQL_CNTL
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC1_MINOR_PTR_UPDATE
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC1_MIDCMD_DATA0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA1
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA2
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA3
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA4
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA5
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA6
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA7
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA8
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA9
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA10
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_CNTL
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC2_RB_CNTL
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC2_RB_BASE
+#define SDMA_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_BASE_HI
+#define SDMA_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC2_RB_RPTR
+#define SDMA_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_HI
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR
+#define SDMA_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_HI
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC2_RB_RPTR_ADDR_HI
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_ADDR_LO
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_IB_CNTL
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC2_IB_RPTR
+#define SDMA_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_OFFSET
+#define SDMA_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_BASE_LO
+#define SDMA_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC2_IB_BASE_HI
+#define SDMA_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SIZE
+#define SDMA_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_SKIP_CNTL
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC2_CONTEXT_STATUS
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC2_DOORBELL
+#define SDMA_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC2_STATUS
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC2_DOORBELL_LOG
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC2_WATERMARK
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC2_DOORBELL_OFFSET
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC2_CSA_ADDR_LO
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_CSA_ADDR_HI
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SUB_REMAIN
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_PREEMPT
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC2_DUMMY_REG
+#define SDMA_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_RB_AQL_CNTL
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC2_MINOR_PTR_UPDATE
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC2_MIDCMD_DATA0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA1
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA2
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA3
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA4
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA5
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA6
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA7
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA8
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA9
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA10
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_CNTL
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC3_RB_CNTL
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC3_RB_BASE
+#define SDMA_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_BASE_HI
+#define SDMA_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC3_RB_RPTR
+#define SDMA_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_HI
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR
+#define SDMA_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_HI
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC3_RB_RPTR_ADDR_HI
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_ADDR_LO
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_IB_CNTL
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC3_IB_RPTR
+#define SDMA_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_OFFSET
+#define SDMA_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_BASE_LO
+#define SDMA_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC3_IB_BASE_HI
+#define SDMA_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SIZE
+#define SDMA_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_SKIP_CNTL
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC3_CONTEXT_STATUS
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC3_DOORBELL
+#define SDMA_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC3_STATUS
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC3_DOORBELL_LOG
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC3_WATERMARK
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC3_DOORBELL_OFFSET
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC3_CSA_ADDR_LO
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_CSA_ADDR_HI
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SUB_REMAIN
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_PREEMPT
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC3_DUMMY_REG
+#define SDMA_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_RB_AQL_CNTL
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC3_MINOR_PTR_UPDATE
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC3_MIDCMD_DATA0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA1
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA2
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA3
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA4
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA5
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA6
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA7
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA8
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA9
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA10
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_CNTL
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC4_RB_CNTL
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC4_RB_BASE
+#define SDMA_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_BASE_HI
+#define SDMA_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC4_RB_RPTR
+#define SDMA_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_HI
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR
+#define SDMA_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_HI
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC4_RB_RPTR_ADDR_HI
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_ADDR_LO
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_IB_CNTL
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC4_IB_RPTR
+#define SDMA_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_OFFSET
+#define SDMA_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_BASE_LO
+#define SDMA_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC4_IB_BASE_HI
+#define SDMA_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SIZE
+#define SDMA_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_SKIP_CNTL
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC4_CONTEXT_STATUS
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC4_DOORBELL
+#define SDMA_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC4_STATUS
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC4_DOORBELL_LOG
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC4_WATERMARK
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC4_DOORBELL_OFFSET
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC4_CSA_ADDR_LO
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_CSA_ADDR_HI
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SUB_REMAIN
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_PREEMPT
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC4_DUMMY_REG
+#define SDMA_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_RB_AQL_CNTL
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC4_MINOR_PTR_UPDATE
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC4_MIDCMD_DATA0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA1
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA2
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA3
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA4
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA5
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA6
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA7
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA8
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA9
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA10
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_CNTL
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC5_RB_CNTL
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC5_RB_BASE
+#define SDMA_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_BASE_HI
+#define SDMA_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC5_RB_RPTR
+#define SDMA_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_HI
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR
+#define SDMA_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_HI
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC5_RB_RPTR_ADDR_HI
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_ADDR_LO
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_IB_CNTL
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC5_IB_RPTR
+#define SDMA_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_OFFSET
+#define SDMA_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_BASE_LO
+#define SDMA_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC5_IB_BASE_HI
+#define SDMA_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SIZE
+#define SDMA_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_SKIP_CNTL
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC5_CONTEXT_STATUS
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC5_DOORBELL
+#define SDMA_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC5_STATUS
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC5_DOORBELL_LOG
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC5_WATERMARK
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC5_DOORBELL_OFFSET
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC5_CSA_ADDR_LO
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_CSA_ADDR_HI
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SUB_REMAIN
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_PREEMPT
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC5_DUMMY_REG
+#define SDMA_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_RB_AQL_CNTL
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC5_MINOR_PTR_UPDATE
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC5_MIDCMD_DATA0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA1
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA2
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA3
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA4
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA5
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA6
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA7
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA8
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA9
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA10
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_CNTL
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC6_RB_CNTL
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC6_RB_BASE
+#define SDMA_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_BASE_HI
+#define SDMA_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC6_RB_RPTR
+#define SDMA_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_HI
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR
+#define SDMA_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_HI
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC6_RB_RPTR_ADDR_HI
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_ADDR_LO
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_IB_CNTL
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC6_IB_RPTR
+#define SDMA_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_OFFSET
+#define SDMA_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_BASE_LO
+#define SDMA_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC6_IB_BASE_HI
+#define SDMA_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SIZE
+#define SDMA_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_SKIP_CNTL
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC6_CONTEXT_STATUS
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC6_DOORBELL
+#define SDMA_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC6_STATUS
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC6_DOORBELL_LOG
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC6_WATERMARK
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC6_DOORBELL_OFFSET
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC6_CSA_ADDR_LO
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_CSA_ADDR_HI
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SUB_REMAIN
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_PREEMPT
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC6_DUMMY_REG
+#define SDMA_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_RB_AQL_CNTL
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC6_MINOR_PTR_UPDATE
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC6_MIDCMD_DATA0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA1
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA2
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA3
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA4
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA5
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA6
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA7
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA8
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA9
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA10
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_CNTL
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC7_RB_CNTL
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC7_RB_BASE
+#define SDMA_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_BASE_HI
+#define SDMA_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC7_RB_RPTR
+#define SDMA_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_HI
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR
+#define SDMA_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_HI
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC7_RB_RPTR_ADDR_HI
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_ADDR_LO
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_IB_CNTL
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC7_IB_RPTR
+#define SDMA_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_OFFSET
+#define SDMA_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_BASE_LO
+#define SDMA_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC7_IB_BASE_HI
+#define SDMA_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SIZE
+#define SDMA_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_SKIP_CNTL
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC7_CONTEXT_STATUS
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC7_DOORBELL
+#define SDMA_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC7_STATUS
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC7_DOORBELL_LOG
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC7_WATERMARK
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC7_DOORBELL_OFFSET
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC7_CSA_ADDR_LO
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_CSA_ADDR_HI
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SUB_REMAIN
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_PREEMPT
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC7_DUMMY_REG
+#define SDMA_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_RB_AQL_CNTL
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC7_MINOR_PTR_UPDATE
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC7_MIDCMD_DATA0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA1
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA2
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA3
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA4
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA5
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA6
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA7
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA8
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA9
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA10
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_CNTL
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 75f18791cdb9..86b6b0c9fb02 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -160,6 +160,8 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
PP_SMC_POWER_PROFILE_WINDOW3D = 0x7,
+ PP_SMC_POWER_PROFILE_CAPPED = 0x8,
+ PP_SMC_POWER_PROFILE_UNCAPPED = 0x9,
PP_SMC_POWER_PROFILE_COUNT,
};
@@ -331,6 +333,8 @@ struct amd_pm_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ int (*get_apu_thermal_limit)(void *handle, uint32_t *limit);
+ int (*set_apu_thermal_limit)(void *handle, uint32_t limit);
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
enum amd_pm_state_type (*get_current_power_state)(void *handle);
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6e79d3352d0b..300e156b924f 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -456,6 +456,34 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
return ret;
}
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index bf6d63673b5a..d75a67cfe523 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -91,6 +91,8 @@ const char * const amdgpu_pp_profile_name[] = {
"COMPUTE",
"CUSTOM",
"WINDOW_3D",
+ "CAPPED",
+ "UNCAPPED",
};
/**
@@ -1686,6 +1688,82 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
}
/**
+ * DOC: apu_thermal_cap
+ *
+ * The amdgpu driver provides a sysfs API for retrieving/updating thermal
+ * limit temperature in millidegrees Celsius
+ *
+ * Reading back the file shows you core limit value
+ *
+ * Writing an integer to the file, sets a new thermal limit. The value
+ * should be between 0 and 100. If the value is less than 0 or greater
+ * than 100, then the write request will be ignored.
+ */
+static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, size;
+ u32 limit;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
+ if (!ret)
+ size = sysfs_emit(buf, "%u\n", limit);
+ else
+ size = sysfs_emit(buf, "failed to get thermal limit\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ u32 value;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = kstrtou32(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > 100) {
+ dev_err(dev, "Invalid argument !\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
+ if (ret) {
+ dev_err(dev, "failed to update thermal limit\n");
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return count;
+}
+
+/**
* DOC: gpu_metrics
*
* The amdgpu driver provides a sysfs API for retrieving current gpu
@@ -1937,6 +2015,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 16addceca68f..d178f3f44081 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -369,6 +369,9 @@ struct amdgpu_pm {
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit);
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
+
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 0652b001ad54..b5d64749990e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -40,6 +40,7 @@
#include "smu_v13_0_0_ppt.h"
#include "smu_v13_0_4_ppt.h"
#include "smu_v13_0_5_ppt.h"
+#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "amd_pcie.h"
@@ -609,6 +610,11 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(13, 0, 6):
+ smu_v13_0_6_set_ppt_funcs(smu);
+ /* Enable pp_od_clk_voltage node */
+ smu->od_enabled = true;
+ break;
case IP_VERSION(13, 0, 7):
smu_v13_0_7_set_ppt_funcs(smu);
break;
@@ -2532,6 +2538,28 @@ unlock:
return ret;
}
+static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
+ ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
+static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
+ ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
@@ -3033,6 +3061,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.emit_clock_levels = smu_emit_ppclk_levels,
.force_performance_level = smu_force_performance_level,
.read_sensor = smu_read_sensor,
+ .get_apu_thermal_limit = smu_get_apu_thermal_limit,
+ .set_apu_thermal_limit = smu_set_apu_thermal_limit,
.get_performance_level = smu_get_performance_level,
.get_current_power_state = smu_get_current_power_state,
.get_fan_speed_rpm = smu_get_fan_speed_rpm,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2a03d85bf4e2..09469c750a96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -722,6 +722,18 @@ struct pptable_funcs {
void *data, uint32_t *size);
/**
+ * @get_apu_thermal_limit: get apu core limit from smu
+ * &limit: current limit temperature in millidegrees Celsius
+ */
+ int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
+
+ /**
+ * @set_apu_thermal_limit: update all controllers with new limit
+ * &limit: limit temperature to be setted, in millidegrees Celsius
+ */
+ int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
+
+ /**
* @pre_display_config_changed: Prepare GPU for a display configuration
* change.
*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
index 8361ebd8d876..21e6028a49e6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
@@ -238,7 +238,9 @@ typedef struct {
#define WORKLOAD_PPLIB_VR_BIT 3
#define WORKLOAD_PPLIB_COMPUTE_BIT 4
#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_PPLIB_CAPPED_BIT 6
+#define WORKLOAD_PPLIB_UNCAPPED_BIT 7
+#define WORKLOAD_PPLIB_COUNT 8
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index f77401709d83..2162ecd1057d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
+#define PMFW_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
@@ -198,7 +198,7 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
- uint16_t spare2;
+ uint16_t FilterAlphaValue;
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
new file mode 100644
index 000000000000..be596777cd2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_DRIVER_IF_H
+#define SMU_13_0_6_DRIVER_IF_H
+
+// *** IMPORTANT ***
+// PMFW TEAM: Always increment the interface version if
+// anything is changed in this file
+#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ UNSUPPORTED_1, //50 Kbits/s not supported anymore!
+ I2C_SPEED_STANDARD_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ UNSUPPORTED_2, //1 Mbits/s (in high speed mode) not supported anymore!
+ UNSUPPORTED_3, //2.3 Mbits/s not supported anymore!
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+typedef struct {
+ uint8_t ReadWriteData; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
+ uint8_t SlaveAddress; //Slave address of device
+ uint8_t NumCmds; //Number of commands
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t; // SW I2C Request Table
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+ uint32_t Spare[8];
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SwI2cRequestExternal_t;
+
+typedef enum {
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_LCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+//TODO confirm if this is used in SMU_13_0_6 PPSMC_MSG_SetUclkDpmMode
+typedef enum {
+ UCLK_DPM_MODE_BANDWIDTH,
+ UCLK_DPM_MODE_LATENCY,
+} UCLK_DPM_MODE_e;
+
+typedef struct {
+ //0-26 SOC, 27-29 SOCIO
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableAid_t;
+
+typedef struct {
+ //0-27 GFX, 28-29 SOC
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableXcd_t;
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+// #define TABLE_PPTABLE 0
+// #define TABLE_AVFS_PSM_DEBUG 1
+// #define TABLE_AVFS_FUSE_OVERRIDE 2
+// #define TABLE_PMSTATUSLOG 3
+// #define TABLE_SMU_METRICS 4
+// #define TABLE_DRIVER_SMU_CONFIG 5
+// #define TABLE_I2C_COMMANDS 6
+// #define TABLE_COUNT 7
+
+// // Table transfer status
+// #define TABLE_TRANSFER_OK 0x0
+// #define TABLE_TRANSFER_FAILED 0xFF
+// #define TABLE_TRANSFER_PENDING 0xAB
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
new file mode 100644
index 000000000000..bdccbb4a6276
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PMFW_H
+#define SMU_13_0_6_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_CCLK = 1,
+/*2*/ FEATURE_DPM_FCLK = 2,
+/*3*/ FEATURE_DPM_GFXCLK = 3,
+/*4*/ FEATURE_DPM_LCLK = 4,
+/*5*/ FEATURE_DPM_SOCCLK = 5,
+/*6*/ FEATURE_DPM_UCLK = 6,
+/*7*/ FEATURE_DPM_VCN = 7,
+/*8*/ FEATURE_DPM_XGMI = 8,
+/*9*/ FEATURE_DS_FCLK = 9,
+/*10*/ FEATURE_DS_GFXCLK = 10,
+/*11*/ FEATURE_DS_LCLK = 11,
+/*12*/ FEATURE_DS_MP0CLK = 12,
+/*13*/ FEATURE_DS_MP1CLK = 13,
+/*14*/ FEATURE_DS_MPIOCLK = 14,
+/*15*/ FEATURE_DS_SOCCLK = 15,
+/*16*/ FEATURE_DS_VCN = 16,
+/*17*/ FEATURE_APCC_DFLL = 17,
+/*18*/ FEATURE_APCC_PLUS = 18,
+/*19*/ FEATURE_DF_CSTATE = 19,
+/*20*/ FEATURE_CC6 = 20,
+/*21*/ FEATURE_PC6 = 21,
+/*22*/ FEATURE_CPPC = 22,
+/*23*/ FEATURE_PPT = 23,
+/*24*/ FEATURE_TDC = 24,
+/*25*/ FEATURE_THERMAL = 25,
+/*26*/ FEATURE_SOC_PCC = 26,
+/*27*/ FEATURE_CCD_PCC = 27,
+/*28*/ FEATURE_CCD_EDC = 28,
+/*29*/ FEATURE_PROCHOT = 29,
+/*30*/ FEATURE_DVO_CCLK = 30,
+/*31*/ FEATURE_FDD_AID_HBM = 31,
+/*32*/ FEATURE_FDD_AID_SOC = 32,
+/*33*/ FEATURE_FDD_XCD_EDC = 33,
+/*34*/ FEATURE_FDD_XCD_XVMIN = 34,
+/*35*/ FEATURE_FW_CTF = 35,
+/*36*/ FEATURE_GFXOFF = 36,
+/*37*/ FEATURE_SMU_CG = 37,
+/*38*/ FEATURE_PSI7 = 38,
+/*39*/ FEATURE_CSTATE_BOOST = 39,
+/*40*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 40,
+/*41*/ FEATURE_CXL_QOS = 41,
+/*42*/ FEATURE_SOC_DC_RTC = 42,
+/*43*/ FEATURE_GFX_DC_RTC = 43,
+
+/*44*/ NUM_FEATURES = 44
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ VOLTAGE_COLD_0,
+ VOLTAGE_COLD_1,
+ VOLTAGE_COLD_2,
+ VOLTAGE_COLD_3,
+ VOLTAGE_COLD_4,
+ VOLTAGE_COLD_5,
+ VOLTAGE_COLD_6,
+ VOLTAGE_COLD_7,
+ VOLTAGE_MID_0,
+ VOLTAGE_MID_1,
+ VOLTAGE_MID_2,
+ VOLTAGE_MID_3,
+ VOLTAGE_MID_4,
+ VOLTAGE_MID_5,
+ VOLTAGE_MID_6,
+ VOLTAGE_MID_7,
+ VOLTAGE_HOT_0,
+ VOLTAGE_HOT_1,
+ VOLTAGE_HOT_2,
+ VOLTAGE_HOT_3,
+ VOLTAGE_HOT_4,
+ VOLTAGE_HOT_5,
+ VOLTAGE_HOT_6,
+ VOLTAGE_HOT_7,
+ VOLTAGE_GUARDBAND_COUNT
+} GFX_GUARDBAND_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+} VfMetricsTable_t;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
new file mode 100644
index 000000000000..b838e8db395a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PPSMC_H
+#define SMU_13_0_6_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_Message_Count 0x34
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 96f6c2db955b..297b70b9388f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -243,7 +243,9 @@
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
__SMU_DUMMY_MAP(AllowGpo), \
- __SMU_DUMMY_MAP(Mode2Reset),
+ __SMU_DUMMY_MAP(Mode2Reset), \
+ __SMU_DUMMY_MAP(RequestI2cTransaction), \
+ __SMU_DUMMY_MAP(GetMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 1c0ae2cb757b..0ef37837b164 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -29,11 +29,12 @@
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -244,6 +245,10 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_13_0_dpm_table *single_dpm_table);
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value);
+
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu);
int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 697e98a0a20a..75f18681e984 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
OverDriveTable_t *user_od_table =
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTable_t user_od_table_bak;
int ret = 0;
- /*
- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
- * - either they already have the default OD settings got during cold bootup
- * - or they have some user customized OD settings which cannot be overwritten
- */
- if (smu->adev->in_suspend)
- return 0;
-
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
0, (void *)boot_od_table, false);
if (ret) {
@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
sienna_cichlid_dump_od_table(smu, boot_od_table);
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
+ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
+ user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
+ user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
+ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
+ }
return 0;
}
@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = table_context->overdrive_table;
+ OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ res = smu_v11_0_restore_user_od_settings(smu);
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
+
+ return res;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
int res;
@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_default_od_settings = sienna_cichlid_set_default_od_settings,
.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
- .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
+ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index cb10c7e31264..4590374251f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -203,6 +203,8 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT]
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT),
};
static const uint8_t vangogh_throttler_map[] = {
@@ -1046,7 +1048,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
if (!buf)
return -EINVAL;
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on vangogh.
@@ -1070,7 +1072,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
int workload_type, ret;
uint32_t profile_mode = input[size];
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
@@ -1590,6 +1592,21 @@ static int vangogh_read_sensor(struct smu_context *smu,
return ret;
}
+static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetThermalLimit,
+ 0, limit);
+}
+
+static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetReducedThermalLimit,
+ limit, NULL);
+}
+
+
static int vangogh_set_watermarks_table(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges)
{
@@ -2425,6 +2442,8 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
.is_dpm_running = vangogh_is_dpm_running,
.read_sensor = vangogh_read_sensor,
+ .get_apu_thermal_limit = vangogh_get_apu_thermal_limit,
+ .set_apu_thermal_limit = vangogh_set_apu_thermal_limit,
.get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_watermarks_table = vangogh_set_watermarks_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9043f6ef1aee..7f3493b6c53c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a52ed0580fd7..73175c993da9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -294,6 +294,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 6):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6;
+ adev->pm.fw_version = smu_version;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -1914,10 +1918,9 @@ int smu_v13_0_set_power_source(struct smu_context *smu,
NULL);
}
-static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint16_t level,
- uint32_t *value)
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 923a9fb3c887..e9766fe5656e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -46,6 +46,7 @@
#include "asic_reg/mp/mp_13_0_0_sh_mask.h"
#include "smu_cmn.h"
#include "amdgpu_ras.h"
+#include "umc_v8_10.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -90,6 +91,12 @@
#define DEBUGSMC_MSG_Mode1Reset 2
+/*
+ * SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
+ * use this to check ECCTABLE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
+
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
return 0;
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -1571,7 +1587,9 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0)
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0)
return -EINVAL;
result = smu_cmn_update_table(smu,
@@ -2036,6 +2054,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return -EOPNOTSUPP;
+
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
+ (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+ return ret;
+ else
+ return -EOPNOTSUPP;
+}
+
+static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = smu_v13_0_0_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2111,6 +2187,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
+ .get_ecc_info = smu_v13_0_0_get_ecc_info,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
new file mode 100644
index 000000000000..ea8f3d6fb98b
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -0,0 +1,2069 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v13_0_6_pmfw.h"
+#include "smu13_driver_if_v13_0_6.h"
+#include "smu_v13_0_6_ppsmc.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "smu_v13_0.h"
+#include "smu_v13_0_6_ppt.h"
+#include "nbio/nbio_7_4_offset.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+#include "mp/mp_13_0_6_offset.h"
+#include "mp/mp_13_0_6_sh_mask.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/* TODO: Check final register offsets */
+#define MP1_Public 0x03b00000
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
+ [smu_feature] = { 1, (smu_13_0_6_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
+ FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
+ FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
+ FEATURE_MASK(FEATURE_DPM_VCN))
+
+/* possible frequency drift (1Mhz) */
+#define EPSILON 1
+
+#define smnPCIE_ESM_CTRL 0x111003D0
+
+static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+ CLK_MAP(LCLK, PPCLK_LCLK),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+};
+
+#define TABLE_PMSTATUSLOG 0
+#define TABLE_SMU_METRICS 1
+#define TABLE_I2C_COMMANDS 2
+#define TABLE_COUNT 3
+
+static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(I2C_COMMANDS),
+};
+
+#define THROTTLER_PROCHOT_GFX_BIT 0
+#define THROTTLER_PPT_BIT 1
+#define THROTTLER_TEMP_SOC_BIT 2
+#define THROTTLER_TEMP_VR_GFX_BIT 3
+#define THROTTLER_TEMP_HBM_BIT 4
+
+static const uint8_t smu_v13_0_6_throttler_map[] = {
+ [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
+};
+
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ bool Init;
+};
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+
+struct smu_v13_0_6_dpm_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature_num;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t *freq_table;
+};
+
+static int smu_v13_0_6_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->flags & AMD_IS_APU))
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table =
+ kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table) {
+ kfree(smu_table->metrics_table);
+ return -ENOMEM;
+ }
+
+ smu_table->driver_pptable =
+ kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context =
+ kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+ smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v13_0_6_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v13_0_6_allocate_dpm_context(smu);
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask,
+ uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ /* pptable will handle the features to enable */
+ memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
+ void *metrics_table, bool bypass_cache)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ if (bypass_cache || !smu_table->metrics_time ||
+ time_after(jiffies,
+ smu_table->metrics_time + msecs_to_jiffies(1))) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export SMU metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (metrics_table)
+ memcpy(metrics_table, smu_table->metrics_table, table_size);
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+ int i;
+
+ /* Store one-time values in driver PPTable */
+ if (!pptable->Init) {
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
+ metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
+ }
+
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t clock_limit = 0, param;
+ int ret = 0, clk_id = 0;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (pptable->Init)
+ clock_limit = pptable->MinGfxclkFrequency;
+ break;
+ case SMU_SOCCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_FCLK:
+ if (pptable->Init)
+ clock_limit = pptable->FclkFrequencyTable[0];
+ break;
+ case SMU_VCLK:
+ if (pptable->Init)
+ clock_limit = pptable->VclkFrequencyTable[0];
+ break;
+ case SMU_DCLK:
+ if (pptable->Init)
+ clock_limit = pptable->DclkFrequencyTable[0];
+ break;
+ default:
+ break;
+ }
+
+ if (min)
+ *min = clock_limit;
+
+ if (max)
+ *max = clock_limit;
+
+ return 0;
+ }
+
+ if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
+ clk_id = smu_cmn_to_asic_specific_index(
+ smu, CMN2ASIC_MAPPING_CLK, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+ }
+
+ if (max) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMaxGfxclkFrequency, max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMaxDpmFreq, param, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMinGfxclkFrequency, min);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMinDpmFreq, param, min);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *levels)
+{
+ int ret;
+
+ ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
+ if (!ret)
+ ++(*levels);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t gfxclkmin, gfxclkmax, levels;
+ int ret = 0, i, j;
+ struct smu_v13_0_6_dpm_map dpm_map[] = {
+ { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
+ &dpm_context->dpm_tables.soc_table,
+ pptable->SocclkFrequencyTable },
+ { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
+ &dpm_context->dpm_tables.uclk_table,
+ pptable->UclkFrequencyTable },
+ { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
+ &dpm_context->dpm_tables.fclk_table,
+ pptable->FclkFrequencyTable },
+ { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
+ &dpm_context->dpm_tables.vclk_table,
+ pptable->VclkFrequencyTable },
+ { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
+ &dpm_context->dpm_tables.dclk_table,
+ pptable->DclkFrequencyTable },
+ };
+
+ smu_v13_0_6_setup_driver_pptable(smu);
+
+ /* gfxclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ /* In the case of gfxclk, only fine-grained dpm is honored.
+ * Get min/max values from FW.
+ */
+ ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
+ &gfxclkmin, &gfxclkmax);
+ if (ret)
+ return ret;
+
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = gfxclkmin;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = gfxclkmax;
+ dpm_table->dpm_levels[1].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[1].value;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
+ dpm_table = dpm_map[j].dpm_table;
+ levels = 1;
+ if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
+ ret = smu_v13_0_6_get_dpm_level_count(
+ smu, dpm_map[j].clk_type, &levels);
+ if (ret)
+ return ret;
+ }
+ dpm_table->count = levels;
+ for (i = 0; i < dpm_table->count; ++i) {
+ dpm_table->dpm_levels[i].value =
+ dpm_map[j].freq_table[i];
+ dpm_table->dpm_levels[i].enabled = true;
+
+ }
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
+
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ /* TODO: PPTable is not available.
+ * 1) Find an alternate way to get 'PPTable values' here.
+ * 2) Check if there is SW CTF
+ */
+ table_context->thermal_controller_type = 0;
+
+ return 0;
+}
+
+static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags =
+ RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_13_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_13_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+
+ pstate_table->uclk_pstate.min = mem_table->min;
+ pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.curr.min = mem_table->min;
+ pstate_table->uclk_pstate.curr.max = mem_table->max;
+
+ pstate_table->socclk_pstate.min = soc_table->min;
+ pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.curr.min = soc_table->min;
+ pstate_table->socclk_pstate.curr.max = soc_table->max;
+
+ if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
+ soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
+ pstate_table->gfxclk_pstate.standard =
+ gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
+ pstate_table->uclk_pstate.standard =
+ mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
+ pstate_table->socclk_pstate.standard =
+ soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
+ } else {
+ pstate_table->gfxclk_pstate.standard =
+ pstate_table->gfxclk_pstate.min;
+ pstate_table->uclk_pstate.standard =
+ pstate_table->uclk_pstate.min;
+ pstate_table->socclk_pstate.standard =
+ pstate_table->socclk_pstate.min;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_13_0_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
+ dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
+ int32_t frequency2)
+{
+ return (abs(frequency1 - frequency2) <= EPSILON);
+}
+
+static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu,
+ MetricsTable_t *metrics)
+{
+ uint32_t throttler_status = 0;
+
+ throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0;
+ throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0;
+ throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0;
+ throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0;
+ throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0;
+
+ return throttler_status;
+}
+
+static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ *value = 0;
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = smu_v13_0_6_get_throttler_status(smu, metrics);
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_UCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct pp_clock_levels_with_latency clocks;
+ struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_13_0_dpm_context *dpm_context = NULL;
+ uint32_t display_levels;
+ uint32_t freq_values[3] = { 0 };
+ uint32_t min_clk, max_clk;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
+
+ dpm_context = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
+ fallthrough;
+ case SMU_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ display_levels = clocks.num_levels;
+
+ min_clk = pstate_table->gfxclk_pstate.curr.min;
+ max_clk = pstate_table->gfxclk_pstate.curr.max;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
+ /* fine-grained dpm has only 2 levels */
+ if (now > min_clk && now < max_clk) {
+ display_levels = clocks.num_levels + 1;
+ freq_values[2] = max_clk;
+ freq_values[1] = now;
+ }
+
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
+ if (display_levels == clocks.num_levels) {
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ freq_values[i], now) ?
+ "*" :
+ ""));
+ } else {
+ for (i = 0; i < display_levels; i++)
+ size += sysfs_emit_at(buf, size,
+ "%d: %uMhz %s\n", i,
+ freq_values[i],
+ i == 1 ? "*" : "");
+ }
+
+ break;
+
+ case SMU_OD_MCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
+ fallthrough;
+ case SMU_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_SOCCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_FCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get fclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_VCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current vclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get vclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_DCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current dclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get dclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask, uint32_t level)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ uint32_t freq;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
+ freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxGfxClk :
+ SMU_MSG_SetSoftMinGfxclk),
+ freq & 0xffff, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
+ freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
+ .value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
+ freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, uint32_t mask)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ if (soft_max_level >= single_dpm_table->count) {
+ dev_err(smu->adev->dev,
+ "Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_min_level);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_max_level);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ /*
+ * Should not arrive here since smu_13_0_6 does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_GFXACTIVITY, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_MEMACTIVITY, value);
+ break;
+ default:
+ dev_err(smu->adev->dev,
+ "Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ if (!value)
+ return -EINVAL;
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
+ value);
+}
+
+static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_HOTSPOT, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_MEM, value);
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor, void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_UCLK, (uint32_t *)data);
+ /* the output clock frequency in 10K unit */
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_GFXCLK, (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t power_limit = 0;
+ int ret;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (current_power_limit)
+ *current_power_limit = 0;
+ if (default_power_limit)
+ *default_power_limit = 0;
+ if (max_power_limit)
+ *max_power_limit = 0;
+
+ dev_warn(
+ smu->adev->dev,
+ "PPT feature is not enabled, power values can't be fetched.");
+
+ return 0;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+
+ if (ret) {
+ dev_err(smu->adev->dev, "Couldn't get PPT limit");
+ return -EINVAL;
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+}
+
+static int smu_v13_0_6_system_features_control(struct smu_context *smu,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing to be done for APU */
+ if (smu->adev->flags & AMD_IS_APU)
+ return 0;
+
+ ret = smu_v13_0_system_features_control(smu, enable);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ max & 0xffff, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
+ min & 0xffff, NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret;
+
+ /* Disable determinism if switching to another mode */
+ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
+ (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
+ smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ return 0;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
+ (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, gfx_table->min, gfx_table->max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ return 0;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ return -EINVAL;
+
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (min >= max) {
+ dev_err(smu->adev->dev,
+ "Minimum GFX clk should be less than the maximum allowed clock\n");
+ return -EINVAL;
+ }
+
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+
+ return ret;
+ }
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+ (max > dpm_context->dpm_tables.gfx_table.max)) {
+ dev_warn(
+ adev->dev,
+ "Invalid max frequency %d MHz specified for determinism\n",
+ max);
+ return -EINVAL;
+ }
+
+ /* Restore default min/max clocks and enable determinism */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
+ max_clk);
+ if (!ret) {
+ usleep_range(500, 1000);
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_EnableDeterminism, max, NULL);
+ if (ret) {
+ dev_err(adev->dev,
+ "Failed to enable determinism at GFX clock %d MHz\n",
+ max);
+ } else {
+ pstate_table->gfxclk_pstate.curr.min = min_clk;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ /* Only allowed in manual or determinism mode */
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
+ dev_warn(
+ smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.min);
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.min = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.max);
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ /* Use the default frequencies for manual and determinism mode */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (!pstate_table->gfxclk_pstate.custom.min)
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+
+ if (!pstate_table->gfxclk_pstate.custom.max)
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+
+ min_clk = pstate_table->gfxclk_pstate.custom.min;
+ max_clk = pstate_table->gfxclk_pstate.custom.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ uint32_t smu_version;
+ int ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO && smu_version < 0x552F00) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
+ void *table_data)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t table_size;
+ int ret = 0;
+
+ if (!table_data)
+ return -EINVAL;
+
+ table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
+
+ memcpy(table->cpu_addr, table_data, table_size);
+ /* Flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
+ NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_v13_0_6_request_i2c_xfer(smu, req);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
+ .master_xfer = smu_v13_0_6_i2c_xfer,
+ .functionality = smu_v13_0_6_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v13_0_6_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v13_0_6_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ //SmuMetrics_t *metrics = smu->smu_table.metrics_table;
+ uint32_t upper32 = 0, lower32 = 0;
+ int ret;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ goto out;
+
+ //upper32 = metrics->PublicSerialNumUpper32;
+ //lower32 = metrics->PublicSerialNumLower32;
+
+out:
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
+}
+
+static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
+{
+ /* smu_13_0_6 does not support baco */
+
+ return false;
+}
+
+static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
+ state, NULL);
+}
+
+static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
+ en ? 0 : 1, NULL);
+}
+
+static const struct throttling_logging_label {
+ uint32_t feature_mask;
+ const char *label;
+} logging_label[] = {
+ { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" },
+ { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" },
+ { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" },
+};
+static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
+{
+ int ret;
+ int throttler_idx, throtting_events = 0, buf_idx = 0;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t throttler_status;
+ char log_buf[256];
+
+ ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS,
+ &throttler_status);
+ if (ret)
+ return;
+
+ memset(log_buf, 0, sizeof(log_buf));
+ for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
+ throttler_idx++) {
+ if (throttler_status &
+ logging_label[throttler_idx].feature_mask) {
+ throtting_events++;
+ buf_idx += snprintf(log_buf + buf_idx,
+ sizeof(log_buf) - buf_idx, "%s%s",
+ throtting_events > 1 ? " and " : "",
+ logging_label[throttler_idx].label);
+ if (buf_idx >= sizeof(log_buf)) {
+ dev_err(adev->dev, "buffer overflow!\n");
+ log_buf[sizeof(log_buf) - 1] = '\0';
+ break;
+ }
+ }
+ }
+
+ dev_warn(
+ adev->dev,
+ "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
+ log_buf);
+ kgd2kfd_smi_event_throttle(
+ smu->adev->kfd.dev,
+ smu_cmn_get_indep_throttler_status(throttler_status,
+ smu_v13_0_6_throttler_map));
+}
+
+static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t esm_ctrl;
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+ if ((esm_ctrl >> 15) & 0x1FFFF)
+ return (((esm_ctrl >> 8) & 0x3F) + 128);
+
+ return smu_v13_0_get_current_pcie_link_speed(smu);
+}
+
+static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ MetricsTable_t *metrics;
+ int i, ret = 0;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+ /* TODO: Decide on how to fill in zero value fields */
+ gpu_metrics->temperature_edge = 0;
+ gpu_metrics->temperature_hotspot = 0;
+ gpu_metrics->temperature_mem = 0;
+ gpu_metrics->temperature_vrgfx = 0;
+ gpu_metrics->temperature_vrsoc = 0;
+ gpu_metrics->temperature_vrmem = 0;
+
+ gpu_metrics->average_gfx_activity = 0;
+ gpu_metrics->average_umc_activity = 0;
+ gpu_metrics->average_mm_activity = 0;
+
+ gpu_metrics->average_socket_power = 0;
+ gpu_metrics->energy_accumulator = 0;
+
+ gpu_metrics->average_gfxclk_frequency = 0;
+ gpu_metrics->average_socclk_frequency = 0;
+ gpu_metrics->average_uclk_frequency = 0;
+ gpu_metrics->average_vclk0_frequency = 0;
+ gpu_metrics->average_dclk0_frequency = 0;
+
+ gpu_metrics->current_gfxclk = 0;
+ gpu_metrics->current_socclk = 0;
+ gpu_metrics->current_uclk = 0;
+ gpu_metrics->current_vclk0 = 0;
+ gpu_metrics->current_dclk0 = 0;
+
+ gpu_metrics->throttle_status = 0;
+ gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(
+ gpu_metrics->throttle_status, smu_v13_0_6_throttler_map);
+
+ gpu_metrics->current_fan_speed = 0;
+
+ gpu_metrics->pcie_link_width = 0;
+ gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = 0;
+ gpu_metrics->mem_activity_acc = 0;
+
+ for (i = 0; i < NUM_HBM_INSTANCES; i++)
+ gpu_metrics->temperature_hbm[i] = 0;
+
+ gpu_metrics->firmware_timestamp = 0;
+
+ *table = (void *)gpu_metrics;
+ kfree(metrics);
+
+ return sizeof(struct gpu_metrics_v1_3);
+}
+
+static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+
+ mutex_lock(&smu->message_lock);
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+ /* This is similar to FLR, wait till max FLR timeout */
+ msleep(100);
+ dev_dbg(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ while (ret == -ETIME && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret != 1) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+ }
+
+ if (ret == 1)
+ ret = 0;
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
+static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras;
+ u32 fatal_err, param;
+ int ret = 0;
+
+ ras = amdgpu_ras_get_context(adev);
+ fatal_err = 0;
+ param = SMU_RESET_MODE_1;
+
+ /* fatal error triggered by ras, PMFW supports the flag */
+ if (ras && atomic_read(&ras->in_recovery))
+ fatal_err = 1;
+
+ param |= (fatal_err << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ param, NULL);
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
+{
+ /* TODO: Enable this when FW support is added */
+ return false;
+}
+
+static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update HBM bad pages number\n",
+ __func__);
+
+ return ret;
+}
+
+static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
+ /* init dpm */
+ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
+ /* dpm/clk tables */
+ .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
+ .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
+ .print_clk_levels = smu_v13_0_6_print_clk_levels,
+ .force_clk_levels = smu_v13_0_6_force_clk_levels,
+ .read_sensor = smu_v13_0_6_read_sensor,
+ .set_performance_level = smu_v13_0_6_set_performance_level,
+ .get_power_limit = smu_v13_0_6_get_power_limit,
+ .is_dpm_running = smu_v13_0_6_is_dpm_running,
+ .get_unique_id = smu_v13_0_6_get_unique_id,
+ .init_smc_tables = smu_v13_0_6_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .init_power = smu_v13_0_init_power,
+ .fini_power = smu_v13_0_fini_power,
+ .check_fw_status = smu_v13_0_6_check_fw_status,
+ /* pptable related */
+ .check_fw_version = smu_v13_0_check_fw_version,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .set_tool_table_location = smu_v13_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
+ .system_features_control = smu_v13_0_6_system_features_control,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
+ .get_enabled_mask = smu_v13_0_6_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_limit = smu_v13_0_6_set_power_limit,
+ .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
+ /* TODO: Thermal limits unknown, skip these for now
+ .register_irq_handler = smu_v13_0_register_irq_handler,
+ .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
+ .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
+ */
+ .setup_pptable = smu_v13_0_6_setup_pptable,
+ .baco_is_support = smu_v13_0_6_is_baco_supported,
+ .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
+ .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
+ .set_df_cstate = smu_v13_0_6_set_df_cstate,
+ .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
+ .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
+ .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .mode1_reset = smu_v13_0_6_mode1_reset,
+ .mode2_reset = smu_v13_0_6_mode2_reset,
+ .wait_for_event = smu_v13_0_wait_for_event,
+ .i2c_init = smu_v13_0_6_i2c_control_init,
+ .i2c_fini = smu_v13_0_6_i2c_control_fini,
+ .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+};
+
+void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
+ smu->message_map = smu_v13_0_6_message_map;
+ smu->clock_map = smu_v13_0_6_clk_map;
+ smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->table_map = smu_v13_0_6_table_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
new file mode 100644
index 000000000000..f0fa42a645c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_13_0_6_PPT_H__
+#define __SMU_13_0_6_PPT_H__
+
+#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
+#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
+#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+
+extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9e1967d8049e..1b2c82449f20 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1479,7 +1479,9 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0) {
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0) {
result = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index d5abafc5a682..3ecb900e6ecd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -478,13 +478,13 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_WORKLOAD:
- if (index > PP_SMC_POWER_PROFILE_WINDOW3D ||
+ if (index >= PP_SMC_POWER_PROFILE_COUNT ||
!smu->workload_map)
return -EINVAL;
mapping = smu->workload_map[index];
if (!mapping.valid_mapping)
- return -EINVAL;
+ return -ENOTSUPP;
return mapping.map_to;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 12e8f30c65f7..f076a09afac0 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -220,6 +220,18 @@ config DRM_PARADE_PS8640
The PS8640 is a high-performance and low-power
MIPI DSI to eDP converter
+config DRM_SAMSUNG_DSIM
+ tristate "Samsung MIPI DSIM bridge driver"
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ help
+ The Samsung MIPI DSIM bridge controller driver.
+ This MIPI DSIM bridge can be found it on Exynos SoCs and
+ NXP's i.MX8M Mini/Nano.
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 52f6e8b4a821..2b892b7ed59e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
+obj-$(CONFIG_DRM_SAMSUNG_DSIM) += samsung-dsim.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 2019a8167d69..b40baced1331 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -676,8 +676,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
lt->hdmi_port = of_drm_find_bridge(port_node);
if (!lt->hdmi_port) {
- dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
- ret = -ENODEV;
+ ret = -EPROBE_DEFER;
+ dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
goto err_free_host_node;
}
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
new file mode 100644
index 000000000000..e0a402a85787
--- /dev/null
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -0,0 +1,1967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung MIPI DSIM bridge driver.
+ *
+ * Copyright (C) 2021 Amarula Solutions(India)
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ *
+ * Based on exynos_drm_dsi from
+ * Tomasz Figa <t.figa@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/samsung-dsim.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* returns true iff both arguments logically differs */
+#define NEQV(a, b) (!(a) ^ !(b))
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK BIT(8)
+#define DSIM_TX_READY_HS_CLK BIT(10)
+#define DSIM_PLL_STABLE BIT(31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST BIT(16)
+#define DSIM_SWRST BIT(0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
+#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
+#define DSIM_BYTE_CLKEN BIT(24)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
+#define DSIM_ESC_CLKEN BIT(28)
+#define DSIM_TX_REQUEST_HSCLK BIT(31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK BIT(0)
+#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
+#define DSIM_SUB_VC (((x) & 0x3) << 16)
+#define DSIM_MAIN_VC (((x) & 0x3) << 18)
+#define DSIM_HSA_DISABLE_MODE BIT(20)
+#define DSIM_HBP_DISABLE_MODE BIT(21)
+#define DSIM_HFP_DISABLE_MODE BIT(22)
+/*
+ * The i.MX 8M Mini Applications Processor Reference Manual,
+ * Rev. 3, 11/2020 Page 4091
+ * The i.MX 8M Nano Applications Processor Reference Manual,
+ * Rev. 2, 07/2022 Page 3058
+ * The i.MX 8M Plus Applications Processor Reference Manual,
+ * Rev. 1, 06/2021 Page 5436
+ * all claims this bit is 'HseDisableMode' with the definition
+ * 0 = Disables transfer
+ * 1 = Enables transfer
+ *
+ * This clearly states that HSE is not a disabled bit.
+ *
+ * The naming convention follows as per the manual and the
+ * driver logic is based on the MIPI_DSI_MODE_VIDEO_HSE flag.
+ */
+#define DSIM_HSE_DISABLE_MODE BIT(23)
+#define DSIM_AUTO_MODE BIT(24)
+#define DSIM_VIDEO_MODE BIT(25)
+#define DSIM_BURST_MODE BIT(26)
+#define DSIM_SYNC_INFORM BIT(27)
+#define DSIM_EOT_DISABLE BIT(28)
+#define DSIM_MFLUSH_VS BIT(29)
+/* This flag is valid only for exynos3250/3472/5260/5430 */
+#define DSIM_CLKLANE_STOP BIT(30)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST BIT(4)
+#define DSIM_TX_LPDT_LP BIT(6)
+#define DSIM_CMD_LPDT_LP BIT(7)
+#define DSIM_FORCE_BTA BIT(16)
+#define DSIM_FORCE_STOP_STATE BIT(20)
+#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY BIT(31)
+#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
+#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x) ((x) << 28)
+#define DSIM_STABLE_VFP(x) ((x) << 16)
+#define DSIM_MAIN_VBP(x) ((x) << 0)
+#define DSIM_CMD_ALLOW_MASK (0xf << 28)
+#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x) ((x) << 16)
+#define DSIM_MAIN_HBP(x) ((x) << 0)
+#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_HSA(x) ((x) << 0)
+#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x) ((x) << 31)
+#define DSIM_SUB_VRESOL(x) ((x) << 16)
+#define DSIM_SUB_HRESOL(x) ((x) << 0)
+#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE BIT(31)
+#define DSIM_INT_SW_RST_RELEASE BIT(30)
+#define DSIM_INT_SFR_FIFO_EMPTY BIT(29)
+#define DSIM_INT_SFR_HDR_FIFO_EMPTY BIT(28)
+#define DSIM_INT_BTA BIT(25)
+#define DSIM_INT_FRAME_DONE BIT(24)
+#define DSIM_INT_RX_TIMEOUT BIT(21)
+#define DSIM_INT_BTA_TIMEOUT BIT(20)
+#define DSIM_INT_RX_DONE BIT(18)
+#define DSIM_INT_RX_TE BIT(17)
+#define DSIM_INT_RX_ACK BIT(16)
+#define DSIM_INT_RX_ECC_ERR BIT(15)
+#define DSIM_INT_RX_CRC_ERR BIT(14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL BIT(25)
+#define DSIM_RX_DATA_EMPTY BIT(24)
+#define DSIM_SFR_HEADER_FULL BIT(23)
+#define DSIM_SFR_HEADER_EMPTY BIT(22)
+#define DSIM_SFR_PAYLOAD_FULL BIT(21)
+#define DSIM_SFR_PAYLOAD_EMPTY BIT(20)
+#define DSIM_I80_HEADER_FULL BIT(19)
+#define DSIM_I80_HEADER_EMPTY BIT(18)
+#define DSIM_I80_PAYLOAD_FULL BIT(17)
+#define DSIM_I80_PAYLOAD_EMPTY BIT(16)
+#define DSIM_SD_HEADER_FULL BIT(15)
+#define DSIM_SD_HEADER_EMPTY BIT(14)
+#define DSIM_SD_PAYLOAD_FULL BIT(13)
+#define DSIM_SD_PAYLOAD_EMPTY BIT(12)
+#define DSIM_MD_HEADER_FULL BIT(11)
+#define DSIM_MD_HEADER_EMPTY BIT(10)
+#define DSIM_MD_PAYLOAD_FULL BIT(9)
+#define DSIM_MD_PAYLOAD_EMPTY BIT(8)
+#define DSIM_RX_FIFO BIT(4)
+#define DSIM_SFR_FIFO BIT(3)
+#define DSIM_I80_FIFO BIT(2)
+#define DSIM_SD_FIFO BIT(1)
+#define DSIM_MD_FIFO BIT(0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN BIT(14)
+#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x) ((x) << 24)
+#define DSIM_PLL_EN BIT(23)
+#define DSIM_PLL_P(x, offset) ((x) << (offset))
+#define DSIM_PLL_M(x) ((x) << 4)
+#define DSIM_PLL_S(x) ((x) << 1)
+
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP BIT(30)
+#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP BIT(14)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
+#define DSI_MAX_BUS_WIDTH 4
+#define DSI_NUM_VIRTUAL_CHANNELS 4
+#define DSI_TX_FIFO_SIZE 2048
+#define DSI_RX_FIFO_SIZE 256
+#define DSI_XFER_TIMEOUT_MS 100
+#define DSI_RX_FIFO_EMPTY 0x30800002
+
+#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
+
+static const char *const clk_names[5] = {
+ "bus_clk",
+ "sclk_mipi",
+ "phyclk_mipidphy0_bitclkdiv8",
+ "phyclk_mipidphy0_rxclkesc0",
+ "sclk_rgb_vclk_to_dsim0"
+};
+
+enum samsung_dsim_transfer_type {
+ EXYNOS_DSI_TX,
+ EXYNOS_DSI_RX,
+};
+
+enum reg_idx {
+ DSIM_STATUS_REG, /* Status register */
+ DSIM_SWRST_REG, /* Software reset register */
+ DSIM_CLKCTRL_REG, /* Clock control register */
+ DSIM_TIMEOUT_REG, /* Time out register */
+ DSIM_CONFIG_REG, /* Configuration register */
+ DSIM_ESCMODE_REG, /* Escape mode register */
+ DSIM_MDRESOL_REG,
+ DSIM_MVPORCH_REG, /* Main display Vporch register */
+ DSIM_MHPORCH_REG, /* Main display Hporch register */
+ DSIM_MSYNC_REG, /* Main display sync area register */
+ DSIM_INTSRC_REG, /* Interrupt source register */
+ DSIM_INTMSK_REG, /* Interrupt mask register */
+ DSIM_PKTHDR_REG, /* Packet Header FIFO register */
+ DSIM_PAYLOAD_REG, /* Payload FIFO register */
+ DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_FIFOCTRL_REG, /* FIFO status and control register */
+ DSIM_PLLCTRL_REG, /* PLL control register */
+ DSIM_PHYCTRL_REG,
+ DSIM_PHYTIMING_REG,
+ DSIM_PHYTIMING1_REG,
+ DSIM_PHYTIMING2_REG,
+ NUM_REGS
+};
+
+static const unsigned int exynos_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x00,
+ [DSIM_SWRST_REG] = 0x04,
+ [DSIM_CLKCTRL_REG] = 0x08,
+ [DSIM_TIMEOUT_REG] = 0x0c,
+ [DSIM_CONFIG_REG] = 0x10,
+ [DSIM_ESCMODE_REG] = 0x14,
+ [DSIM_MDRESOL_REG] = 0x18,
+ [DSIM_MVPORCH_REG] = 0x1c,
+ [DSIM_MHPORCH_REG] = 0x20,
+ [DSIM_MSYNC_REG] = 0x24,
+ [DSIM_INTSRC_REG] = 0x2c,
+ [DSIM_INTMSK_REG] = 0x30,
+ [DSIM_PKTHDR_REG] = 0x34,
+ [DSIM_PAYLOAD_REG] = 0x38,
+ [DSIM_RXFIFO_REG] = 0x3c,
+ [DSIM_FIFOCTRL_REG] = 0x44,
+ [DSIM_PLLCTRL_REG] = 0x4c,
+ [DSIM_PHYCTRL_REG] = 0x5c,
+ [DSIM_PHYTIMING_REG] = 0x64,
+ [DSIM_PHYTIMING1_REG] = 0x68,
+ [DSIM_PHYTIMING2_REG] = 0x6c,
+};
+
+static const unsigned int exynos5433_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x04,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_CONFIG_REG] = 0x18,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
+enum reg_value_idx {
+ RESET_TYPE,
+ PLL_TIMER,
+ STOP_STATE_CNT,
+ PHYCTRL_ULPS_EXIT,
+ PHYCTRL_VREG_LP,
+ PHYCTRL_SLEW_UP,
+ PHYTIMING_LPX,
+ PHYTIMING_HS_EXIT,
+ PHYTIMING_CLK_PREPARE,
+ PHYTIMING_CLK_ZERO,
+ PHYTIMING_CLK_POST,
+ PHYTIMING_CLK_TRAIL,
+ PHYTIMING_HS_PREPARE,
+ PHYTIMING_HS_ZERO,
+ PHYTIMING_HS_TRAIL
+};
+
+static const unsigned int reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const unsigned int exynos5422_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
+};
+
+static const unsigned int exynos5433_reg_values[] = {
+ [RESET_TYPE] = DSIM_FUNCRST,
+ [PLL_TIMER] = 22200,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
+ [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
+ [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
+static const unsigned int imx8mm_dsim_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = 0,
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x26),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x08),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x58,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 5,
+ .max_freq = 1500,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5433_reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1500,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5422_reg_values,
+};
+
+static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 2100,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ /*
+ * Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
+ * downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
+ */
+ .pll_p_offset = 14,
+ .reg_values = imx8mm_dsim_reg_values,
+};
+
+static const struct samsung_dsim_driver_data *
+samsung_dsim_types[DSIM_TYPE_COUNT] = {
+ [DSIM_TYPE_EXYNOS3250] = &exynos3_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS4210] = &exynos4_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
+ [DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
+};
+
+static inline struct samsung_dsim *host_to_dsi(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct samsung_dsim, dsi_host);
+}
+
+static inline struct samsung_dsim *bridge_to_dsi(struct drm_bridge *b)
+{
+ return container_of(b, struct samsung_dsim, bridge);
+}
+
+static inline void samsung_dsim_write(struct samsung_dsim *dsi,
+ enum reg_idx idx, u32 val)
+{
+ writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static inline u32 samsung_dsim_read(struct samsung_dsim *dsi, enum reg_idx idx)
+{
+ return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static void samsung_dsim_wait_for_reset(struct samsung_dsim *dsi)
+{
+ if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+ return;
+
+ dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void samsung_dsim_reset(struct samsung_dsim *dsi)
+{
+ u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
+
+ reinit_completion(&dsi->completed);
+ samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
+}
+
+#ifndef MHZ
+#define MHZ (1000 * 1000)
+#endif
+
+static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
+ unsigned long fin,
+ unsigned long fout,
+ u8 *p, u16 *m, u8 *s)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long best_freq = 0;
+ u32 min_delta = 0xffffffff;
+ u8 p_min, p_max;
+ u8 _p, best_p;
+ u16 _m, best_m;
+ u8 _s, best_s;
+
+ p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+ p_max = fin / (6 * MHZ);
+
+ for (_p = p_min; _p <= p_max; ++_p) {
+ for (_s = 0; _s <= 5; ++_s) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * (_p << _s);
+ do_div(tmp, fin);
+ _m = tmp;
+ if (_m < 41 || _m > 125)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p);
+ if (tmp < 500 * MHZ ||
+ tmp > driver_data->max_freq * MHZ)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p << _s);
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_p = _p;
+ best_m = _m;
+ best_s = _s;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+ }
+
+ if (best_freq) {
+ *p = best_p;
+ *m = best_m;
+ *s = best_s;
+ }
+
+ return best_freq;
+}
+
+static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
+ unsigned long freq)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long fin, fout;
+ int timeout;
+ u8 p, s;
+ u16 m;
+ u32 reg;
+
+ fin = dsi->pll_clk_rate;
+ fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+ if (!fout) {
+ dev_err(dsi->dev,
+ "failed to find PLL PMS for requested frequency\n");
+ return 0;
+ }
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
+
+ writel(driver_data->reg_values[PLL_TIMER],
+ dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
+ DSIM_PLL_M(m) | DSIM_PLL_S(s);
+
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
+
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
+
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+
+ timeout = 1000;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "PLL failed to stabilize\n");
+ return 0;
+ }
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ } while ((reg & DSIM_PLL_STABLE) == 0);
+
+ return fout;
+}
+
+static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
+{
+ unsigned long hs_clk, byte_clk, esc_clk;
+ unsigned long esc_div;
+ u32 reg;
+
+ hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate);
+ if (!hs_clk) {
+ dev_err(dsi->dev, "failed to configure DSI PLL\n");
+ return -EFAULT;
+ }
+
+ byte_clk = hs_clk / 8;
+ esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
+ esc_clk = byte_clk / esc_div;
+
+ if (esc_clk > 20 * MHZ) {
+ ++esc_div;
+ esc_clk = byte_clk / esc_div;
+ }
+
+ dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+ hs_clk, byte_clk, esc_clk);
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+ | DSIM_ESC_PRESCALER(esc_div)
+ | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+ | DSIM_BYTE_CLK_SRC(0)
+ | DSIM_TX_REQUEST_HSCLK;
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ const unsigned int *reg_values = driver_data->reg_values;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
+ reg_values[PHYCTRL_SLEW_UP];
+ samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_CLK_PREPARE] |
+ reg_values[PHYTIMING_CLK_ZERO] |
+ reg_values[PHYTIMING_CLK_POST] |
+ reg_values[PHYTIMING_CLK_TRAIL];
+
+ samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
+ reg_values[PHYTIMING_HS_TRAIL];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg);
+}
+
+static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+ | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
+ reg &= ~DSIM_PLL_EN;
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+}
+
+static void samsung_dsim_enable_lane(struct samsung_dsim *dsi, u32 lane)
+{
+ u32 reg = samsung_dsim_read(dsi, DSIM_CONFIG_REG);
+
+ reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
+ DSIM_LANE_EN(lane));
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+}
+
+static int samsung_dsim_init_link(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int timeout;
+ u32 reg;
+ u32 lanes_mask;
+
+ /* Initialize FIFO pointers */
+ reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+ reg &= ~0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+
+ usleep_range(9000, 11000);
+
+ reg |= 0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+ usleep_range(9000, 11000);
+
+ /* DSI configuration */
+ reg = 0;
+
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg |= DSIM_VIDEO_MODE;
+
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+ reg |= DSIM_MFLUSH_VS;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ reg |= DSIM_SYNC_INFORM;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ reg |= DSIM_BURST_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+ reg |= DSIM_AUTO_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ reg |= DSIM_HSE_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ reg |= DSIM_HFP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ reg |= DSIM_HBP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ reg |= DSIM_HSA_DISABLE_MODE;
+ }
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ reg |= DSIM_EOT_DISABLE;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+ break;
+ default:
+ dev_err(dsi->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Use non-continuous clock mode if the periparal wants and
+ * host controller supports
+ *
+ * In non-continous clock mode, host controller will turn off
+ * the HS clock between high-speed transmissions to reduce
+ * power consumption.
+ */
+ if (driver_data->has_clklane_stop &&
+ dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ reg |= DSIM_CLKLANE_STOP;
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+
+ lanes_mask = BIT(dsi->lanes) - 1;
+ samsung_dsim_enable_lane(dsi, lanes_mask);
+
+ /* Check clock and data lane state are stop state */
+ timeout = 100;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+ return -EFAULT;
+ }
+
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+ != DSIM_STOP_STATE_DAT(lanes_mask))
+ continue;
+ } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+ reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+ samsung_dsim_write(dsi, DSIM_TIMEOUT_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
+{
+ struct drm_display_mode *m = &dsi->mode;
+ unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ u32 reg;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg = DSIM_CMD_ALLOW(0xf)
+ | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
+ | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
+ samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg);
+
+ reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
+ | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
+ samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
+
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
+ samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
+ }
+ reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
+ DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
+
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
+}
+
+static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
+ if (enable)
+ reg |= DSIM_MAIN_STAND_BY;
+ else
+ reg &= ~DSIM_MAIN_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+}
+
+static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+{
+ int timeout = 2000;
+
+ do {
+ u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+
+ if (!(reg & DSIM_SFR_HEADER_FULL))
+ return 0;
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+ } while (--timeout);
+
+ return -ETIMEDOUT;
+}
+
+static void samsung_dsim_set_cmd_lpm(struct samsung_dsim *dsi, bool lpm)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ if (lpm)
+ v |= DSIM_CMD_LPDT_LP;
+ else
+ v &= ~DSIM_CMD_LPDT_LP;
+
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_force_bta(struct samsung_dsim *dsi)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ v |= DSIM_FORCE_BTA;
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ struct device *dev = dsi->dev;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload = pkt->payload + xfer->tx_done;
+ u16 length = pkt->payload_length - xfer->tx_done;
+ bool first = !xfer->tx_done;
+ u32 reg;
+
+ dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
+ xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (length > DSI_TX_FIFO_SIZE)
+ length = DSI_TX_FIFO_SIZE;
+
+ xfer->tx_done += length;
+
+ /* Send payload */
+ while (length >= 4) {
+ reg = get_unaligned_le32(payload);
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ payload += 4;
+ length -= 4;
+ }
+
+ reg = 0;
+ switch (length) {
+ case 3:
+ reg |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ reg |= payload[1] << 8;
+ fallthrough;
+ case 1:
+ reg |= payload[0];
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ break;
+ }
+
+ /* Send packet header */
+ if (!first)
+ return;
+
+ reg = get_unaligned_le32(pkt->header);
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
+
+ if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
+ dsi->state & DSIM_STATE_CMD_LPM)) {
+ samsung_dsim_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
+ dsi->state ^= DSIM_STATE_CMD_LPM;
+ }
+
+ samsung_dsim_write(dsi, DSIM_PKTHDR_REG, reg);
+
+ if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
+ samsung_dsim_force_bta(dsi);
+}
+
+static void samsung_dsim_read_from_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ u8 *payload = xfer->rx_payload + xfer->rx_done;
+ bool first = !xfer->rx_done;
+ struct device *dev = dsi->dev;
+ u16 length;
+ u32 reg;
+
+ if (first) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+
+ switch (reg & 0x3f) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->rx_len >= 2) {
+ payload[1] = reg >> 16;
+ ++xfer->rx_done;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ payload[0] = reg >> 8;
+ ++xfer->rx_done;
+ xfer->rx_len = xfer->rx_done;
+ xfer->result = 0;
+ goto clear_fifo;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_err(dev, "DSI Error Report: 0x%04x\n", (reg >> 8) & 0xffff);
+ xfer->result = 0;
+ goto clear_fifo;
+ }
+
+ length = (reg >> 8) & 0xffff;
+ if (length > xfer->rx_len) {
+ dev_err(dev,
+ "response too long (%u > %u bytes), stripping\n",
+ xfer->rx_len, length);
+ length = xfer->rx_len;
+ } else if (length < xfer->rx_len) {
+ xfer->rx_len = length;
+ }
+ }
+
+ length = xfer->rx_len - xfer->rx_done;
+ xfer->rx_done += length;
+
+ /* Receive payload */
+ while (length >= 4) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ payload[0] = (reg >> 0) & 0xff;
+ payload[1] = (reg >> 8) & 0xff;
+ payload[2] = (reg >> 16) & 0xff;
+ payload[3] = (reg >> 24) & 0xff;
+ payload += 4;
+ length -= 4;
+ }
+
+ if (length) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ switch (length) {
+ case 3:
+ payload[2] = (reg >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ payload[1] = (reg >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ payload[0] = reg & 0xff;
+ }
+ }
+
+ if (xfer->rx_done == xfer->rx_len)
+ xfer->result = 0;
+
+clear_fifo:
+ length = DSI_RX_FIFO_SIZE / 4;
+ do {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ if (reg == DSI_RX_FIFO_EMPTY)
+ break;
+ } while (--length);
+}
+
+static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
+{
+ unsigned long flags;
+ struct samsung_dsim_transfer *xfer;
+ bool start = false;
+
+again:
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
+
+ samsung_dsim_send_to_fifo(dsi, xfer);
+
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
+
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (start)
+ goto again;
+}
+
+static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
+{
+ struct samsung_dsim_transfer *xfer;
+ unsigned long flags;
+ bool start = true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return false;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ dev_dbg(dsi->dev,
+ "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
+ xfer->rx_done);
+
+ if (xfer->tx_done != xfer->packet.payload_length)
+ return true;
+
+ if (xfer->rx_done != xfer->rx_len)
+ samsung_dsim_read_from_fifo(dsi, xfer);
+
+ if (xfer->rx_done != xfer->rx_len)
+ return true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (!xfer->rx_len)
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ return start;
+}
+
+static void samsung_dsim_remove_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool start;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (!list_empty(&dsi->transfer_list) &&
+ xfer == list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list)) {
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ if (start)
+ samsung_dsim_transfer_start(dsi);
+ return;
+ }
+
+ list_del_init(&xfer->list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int samsung_dsim_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool stopped;
+
+ xfer->tx_done = 0;
+ xfer->rx_done = 0;
+ xfer->result = -ETIMEDOUT;
+ init_completion(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ stopped = list_empty(&dsi->transfer_list);
+ list_add_tail(&xfer->list, &dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (stopped)
+ samsung_dsim_transfer_start(dsi);
+
+ wait_for_completion_timeout(&xfer->completed,
+ msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+ if (xfer->result == -ETIMEDOUT) {
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+
+ samsung_dsim_remove_transfer(dsi, xfer);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
+ (int)pkt->payload_length, pkt->payload);
+ return -ETIMEDOUT;
+ }
+
+ /* Also covers hardware timeout condition */
+ return xfer->result;
+}
+
+static irqreturn_t samsung_dsim_irq(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = dev_id;
+ u32 status;
+
+ status = samsung_dsim_read(dsi, DSIM_INTSRC_REG);
+ if (!status) {
+ static unsigned long j;
+
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(dsi->dev, "spurious interrupt\n");
+ return IRQ_HANDLED;
+ }
+ samsung_dsim_write(dsi, DSIM_INTSRC_REG, status);
+
+ if (status & DSIM_INT_SW_RST_RELEASE) {
+ unsigned long mask = ~(DSIM_INT_RX_DONE |
+ DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_SFR_HDR_FIFO_EMPTY |
+ DSIM_INT_RX_ECC_ERR |
+ DSIM_INT_SW_RST_RELEASE);
+ samsung_dsim_write(dsi, DSIM_INTMSK_REG, mask);
+ complete(&dsi->completed);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_PLL_STABLE)))
+ return IRQ_HANDLED;
+
+ if (samsung_dsim_transfer_finish(dsi))
+ samsung_dsim_transfer_start(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static void samsung_dsim_enable_irq(struct samsung_dsim *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (dsi->te_gpio)
+ enable_irq(gpiod_to_irq(dsi->te_gpio));
+}
+
+static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio)
+ disable_irq(gpiod_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
+static int samsung_dsim_init(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+
+ if (dsi->state & DSIM_STATE_INITIALIZED)
+ return 0;
+
+ samsung_dsim_reset(dsi);
+ samsung_dsim_enable_irq(dsi);
+
+ if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
+ samsung_dsim_enable_lane(dsi, BIT(dsi->lanes) - 1);
+
+ samsung_dsim_enable_clock(dsi);
+ if (driver_data->wait_for_reset)
+ samsung_dsim_wait_for_reset(dsi);
+ samsung_dsim_set_phy_ctrl(dsi);
+ samsung_dsim_init_link(dsi);
+
+ dsi->state |= DSIM_STATE_INITIALIZED;
+
+ return 0;
+}
+
+static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dsi->dev);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable DSI device.\n");
+ return;
+ }
+
+ dsi->state |= DSIM_STATE_ENABLED;
+
+ /*
+ * For Exynos-DSIM the downstream bridge, or panel are expecting
+ * the host initialization during DSI transfer.
+ */
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return;
+ }
+}
+
+static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_mode(dsi);
+ samsung_dsim_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+ dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_enable(dsi, false);
+
+ dsi->state &= ~DSIM_STATE_ENABLED;
+ pm_runtime_put_sync(dsi->dev);
+}
+
+/*
+ * This pixel output formats list referenced from,
+ * AN13573 i.MX 8/RT MIPI DSI/CSI-2, Rev. 0, 21 March 2022
+ * 3.7.4 Pixel formats
+ * Table 14. DSI pixel packing formats
+ */
+static const u32 samsung_dsim_pixel_output_fmts[] = {
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+static bool samsung_dsim_pixel_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ if (fmt == MEDIA_BUS_FMT_FIXED)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(samsung_dsim_pixel_output_fmts); i++) {
+ if (samsung_dsim_pixel_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+samsung_dsim_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ if (!samsung_dsim_pixel_output_fmt_supported(output_fmt))
+ /*
+ * Some bridge/display drivers are still not able to pass the
+ * correct format, so handle those pipelines by falling back
+ * to the default format till the supported formats finalized.
+ */
+ output_fmt = MEDIA_BUS_FMT_RGB888_1X24;
+
+ input_fmts[0] = output_fmt;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int samsung_dsim_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+
+ /*
+ * The i.MX8M Mini/Nano glue logic between LCDIF and DSIM
+ * inverts HS/VS/DE sync signals polarity, therefore, while
+ * i.MX 8M Mini Applications Processor Reference Manual Rev. 3, 11/2020
+ * 13.6.3.5.2 RGB interface
+ * i.MX 8M Nano Applications Processor Reference Manual Rev. 2, 07/2022
+ * 13.6.2.7.2 RGB interface
+ * both claim "Vsync, Hsync, and VDEN are active high signals.", the
+ * LCDIF must generate inverted HS/VS/DE signals, i.e. active LOW.
+ *
+ * The i.MX8M Plus glue logic between LCDIFv3 and DSIM does not
+ * implement the same behavior, therefore LCDIFv3 must generate
+ * HS/VS/DE signals active HIGH.
+ */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) {
+ adjusted_mode->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ } else if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MP) {
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ }
+
+ return 0;
+}
+
+static void samsung_dsim_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ drm_mode_copy(&dsi->mode, adjusted_mode);
+}
+
+static int samsung_dsim_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs samsung_dsim_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = samsung_dsim_atomic_get_input_bus_fmts,
+ .atomic_check = samsung_dsim_atomic_check,
+ .atomic_pre_enable = samsung_dsim_atomic_pre_enable,
+ .atomic_enable = samsung_dsim_atomic_enable,
+ .atomic_disable = samsung_dsim_atomic_disable,
+ .atomic_post_disable = samsung_dsim_atomic_post_disable,
+ .mode_set = samsung_dsim_mode_set,
+ .attach = samsung_dsim_attach,
+};
+
+static irqreturn_t samsung_dsim_te_irq_handler(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = (struct samsung_dsim *)dev_id;
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ if (pdata->host_ops && pdata->host_ops->te_irq_handler)
+ return pdata->host_ops->te_irq_handler(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device *dev)
+{
+ int te_gpio_irq;
+ int ret;
+
+ dsi->te_gpio = devm_gpiod_get_optional(dev, "te", GPIOD_IN);
+ if (!dsi->te_gpio)
+ return 0;
+ else if (IS_ERR(dsi->te_gpio))
+ return dev_err_probe(dev, PTR_ERR(dsi->te_gpio), "failed to get te GPIO\n");
+
+ te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
+
+ ret = request_threaded_irq(te_gpio_irq, samsung_dsim_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpiod_put(dsi->te_gpio);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+ struct device *dev = dsi->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote;
+ struct drm_panel *panel;
+ int ret;
+
+ /*
+ * Devices can also be child nodes when we also control that device
+ * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
+ *
+ * Lookup for a child node of the given parent that isn't either port
+ * or ports.
+ */
+ for_each_available_child_of_node(np, remote) {
+ if (of_node_name_eq(remote, "port") ||
+ of_node_name_eq(remote, "ports"))
+ continue;
+
+ goto of_find_panel_or_bridge;
+ }
+
+ /*
+ * of_graph_get_remote_node() produces a noisy error message if port
+ * node isn't found and the absence of the port is a legit case here,
+ * so at first we silently check whether graph presents in the
+ * device-tree node.
+ */
+ if (!of_graph_is_present(np))
+ return -ENODEV;
+
+ remote = of_graph_get_remote_node(np, 1, 0);
+
+of_find_panel_or_bridge:
+ if (!remote)
+ return -ENODEV;
+
+ panel = of_drm_find_panel(remote);
+ if (!IS_ERR(panel)) {
+ dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
+ } else {
+ dsi->out_bridge = of_drm_find_bridge(remote);
+ if (!dsi->out_bridge)
+ dsi->out_bridge = ERR_PTR(-EINVAL);
+ }
+
+ of_node_put(remote);
+
+ if (IS_ERR(dsi->out_bridge)) {
+ ret = PTR_ERR(dsi->out_bridge);
+ DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
+
+ drm_bridge_add(&dsi->bridge);
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ ret = samsung_dsim_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(dsi, device);
+ if (ret)
+ return ret;
+ }
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
+ }
+}
+
+static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ dsi->out_bridge = NULL;
+
+ if (pdata->host_ops && pdata->host_ops->detach)
+ pdata->host_ops->detach(dsi, device);
+
+ samsung_dsim_unregister_te_irq(dsi);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ struct samsung_dsim_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = samsung_dsim_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops samsung_dsim_ops = {
+ .attach = samsung_dsim_host_attach,
+ .detach = samsung_dsim_host_detach,
+ .transfer = samsung_dsim_host_transfer,
+};
+
+static int samsung_dsim_of_read_u32(const struct device_node *np,
+ const char *propname, u32 *out_value)
+{
+ int ret = of_property_read_u32(np, propname, out_value);
+
+ if (ret < 0)
+ pr_err("%pOF: failed to get '%s' property\n", np, propname);
+
+ return ret;
+}
+
+static int samsung_dsim_parse_dt(struct samsung_dsim *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency",
+ &dsi->pll_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency",
+ &dsi->burst_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency",
+ &dsi->esc_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int generic_dsim_register_host(struct samsung_dsim *dsi)
+{
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static void generic_dsim_unregister_host(struct samsung_dsim *dsi)
+{
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct samsung_dsim_host_ops generic_dsim_host_ops = {
+ .register_host = generic_dsim_register_host,
+ .unregister_host = generic_dsim_unregister_host,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_high = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_low = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+int samsung_dsim_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct samsung_dsim *dsi;
+ int ret, i;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ init_completion(&dsi->completed);
+ spin_lock_init(&dsi->transfer_lock);
+ INIT_LIST_HEAD(&dsi->transfer_list);
+
+ dsi->dsi_host.ops = &samsung_dsim_ops;
+ dsi->dsi_host.dev = dev;
+
+ dsi->dev = dev;
+ dsi->plat_data = of_device_get_match_data(dev);
+ dsi->driver_data = samsung_dsim_types[dsi->plat_data->hw_type];
+
+ dsi->supplies[0].supply = "vddcore";
+ dsi->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
+ dsi->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
+ sizeof(*dsi->clks), GFP_KERNEL);
+ if (!dsi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < dsi->driver_data->num_clks; i++) {
+ dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
+ if (IS_ERR(dsi->clks[i])) {
+ if (strcmp(clk_names[i], "sclk_mipi") == 0) {
+ dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
+ }
+
+ dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
+ return PTR_ERR(dsi->clks[i]);
+ }
+ }
+
+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dsi->reg_base))
+ return PTR_ERR(dsi->reg_base);
+
+ dsi->phy = devm_phy_optional_get(dev, "dsim");
+ if (IS_ERR(dsi->phy)) {
+ dev_info(dev, "failed to get dsim phy\n");
+ return PTR_ERR(dsi->phy);
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0)
+ return dsi->irq;
+
+ ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
+ samsung_dsim_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(dev), dsi);
+ if (ret) {
+ dev_err(dev, "failed to request dsi irq\n");
+ return ret;
+ }
+
+ ret = samsung_dsim_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dsi);
+
+ pm_runtime_enable(dev);
+
+ dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ /* DE_LOW: i.MX8M Mini/Nano LCDIF-DSIM glue logic inverts HS/VS/DE */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM)
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_low;
+ else
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high;
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host)
+ ret = dsi->plat_data->host_ops->register_host(dsi);
+
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_probe);
+
+int samsung_dsim_remove(struct platform_device *pdev)
+{
+ struct samsung_dsim *dsi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host)
+ dsi->plat_data->host_ops->unregister_host(dsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_remove);
+
+static int __maybe_unused samsung_dsim_suspend(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ samsung_dsim_disable_clock(dsi);
+
+ samsung_dsim_disable_irq(dsi);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ for (i = driver_data->num_clks - 1; i > -1; i--)
+ clk_disable_unprepare(dsi->clks[i]);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int __maybe_unused samsung_dsim_resume(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < driver_data->num_clks; i++) {
+ ret = clk_prepare_enable(dsi->clks[i]);
+ if (ret < 0)
+ goto err_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ while (--i > -1)
+ clk_disable_unprepare(dsi->clks[i]);
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+
+const struct dev_pm_ops samsung_dsim_pm_ops = {
+ SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+EXPORT_SYMBOL_GPL(samsung_dsim_pm_ops);
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mm_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MM,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mp_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MP,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct of_device_id samsung_dsim_of_match[] = {
+ {
+ .compatible = "fsl,imx8mm-mipi-dsim",
+ .data = &samsung_dsim_imx8mm_pdata,
+ },
+ {
+ .compatible = "fsl,imx8mp-mipi-dsim",
+ .data = &samsung_dsim_imx8mp_pdata,
+ },
+ { /* sentinel. */ }
+};
+MODULE_DEVICE_TABLE(of, samsung_dsim_of_match);
+
+static struct platform_driver samsung_dsim_driver = {
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
+ .driver = {
+ .name = "samsung-dsim",
+ .owner = THIS_MODULE,
+ .pm = &samsung_dsim_pm_ops,
+ .of_match_table = samsung_dsim_of_match,
+ },
+};
+
+module_platform_driver(samsung_dsim_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Samsung MIPI DSIM controller bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8606876f7233..d4d2a2ce40f8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1511,6 +1511,41 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
+/*
+ * For atomic updates which touch just a single CRTC, calculate the time of the
+ * next vblank, and inform all the fences of the deadline.
+ */
+static void set_fence_deadline(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ ktime_t vbltime = 0;
+ int i;
+
+ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ ktime_t v;
+
+ if (drm_crtc_next_vblank_start(crtc, &v))
+ continue;
+
+ if (!vbltime || ktime_before(v, vbltime))
+ vbltime = v;
+ }
+
+ /* If no CRTCs updated, then nothing to do: */
+ if (!vbltime)
+ return;
+
+ for_each_new_plane_in_state (state, plane, new_plane_state, i) {
+ if (!new_plane_state->fence)
+ continue;
+ dma_fence_set_deadline(new_plane_state->fence, vbltime);
+ }
+}
+
/**
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
@@ -1540,6 +1575,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_plane_state *new_plane_state;
int i, ret;
+ set_fence_deadline(dev, state);
+
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
if (!new_plane_state->fence)
continue;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c18ec866678d..0454da505687 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
* the EDID then we'll just return 0.
*/
- base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee3e11e7177d..a6208e2c089b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1381,10 +1381,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
@@ -1423,8 +1426,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
- if (!dma_resv_trylock(obj->resv))
+ if (!dma_resv_trylock(obj->resv)) {
+ *remaining += obj->size >> PAGE_SHIFT;
goto tail;
+ }
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 9b0d540ff4a8..4ea6507a77e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -623,11 +623,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret;
if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
vma->vm_private_data = NULL;
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
- return dma_buf_mmap(obj->dma_buf, vma, 0);
+ return ret;
}
ret = drm_gem_shmem_get_pages(shmem);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 5522d610c5cf..b1a38e6ce2f8 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
- }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ }, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
- /* Non exact match to match all versions */
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 2ff31717a3de..299fa2a19a90 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
- * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most
+ * recent vblank interval
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
@@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- ktime_t *tvblank, bool in_vblank_irq)
+drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank,
+ bool in_vblank_irq)
{
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
@@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
/* Query driver if possible and precision timestamping enabled. */
if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
-
ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
}
@@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
return ret;
}
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
+ ktime_t *tvblank, bool in_vblank_irq)
+{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq);
+}
+
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
@@ -980,6 +985,36 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
+/**
+ * drm_crtc_next_vblank_start - calculate the time of the next vblank
+ * @crtc: the crtc for which to calculate next vblank time
+ * @vblanktime: pointer to time to receive the next vblank timestamp.
+ *
+ * Calculate the expected time of the start of the next vblank period,
+ * based on time of previous vblank and frame duration
+ */
+int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime)
+{
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[pipe];
+ struct drm_display_mode *mode = &vblank->hwmode;
+ u64 vblank_start;
+
+ if (!vblank->framedur_ns || !vblank->linedur_ns)
+ return -EINVAL;
+
+ if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false))
+ return -EINVAL;
+
+ vblank_start = DIV_ROUND_DOWN_ULL(
+ (u64)vblank->framedur_ns * mode->crtc_vblank_start,
+ mode->crtc_vtotal);
+ *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start));
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_next_vblank_start);
+
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
u64 seq, ktime_t now)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 4049fa4273ab..0cb92d651ff1 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -59,6 +59,7 @@ config DRM_EXYNOS_DSI
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON
select DRM_MIPI_DSI
select DRM_PANEL
+ select DRM_SAMSUNG_DSIM
default n
help
This enables support for Exynos MIPI-DSI device.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 06d6513ddaae..fc81f728e6ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1,1521 +1,56 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung SoC MIPI DSI Master driver.
+ * Samsung MIPI DSIM glue for Exynos SoCs.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Contacts: Tomasz Figa <t.figa@samsung.com>
-*/
+ */
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/component.h>
-#include <linux/gpio/consumer.h>
-#include <linux/irq.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-
-#include <asm/unaligned.h>
-#include <video/mipi_display.h>
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
+#include <drm/bridge/samsung-dsim.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-/* returns true iff both arguments logically differs */
-#define NEQV(a, b) (!(a) ^ !(b))
-
-/* DSIM_STATUS */
-#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
-#define DSIM_STOP_STATE_CLK (1 << 8)
-#define DSIM_TX_READY_HS_CLK (1 << 10)
-#define DSIM_PLL_STABLE (1 << 31)
-
-/* DSIM_SWRST */
-#define DSIM_FUNCRST (1 << 16)
-#define DSIM_SWRST (1 << 0)
-
-/* DSIM_TIMEOUT */
-#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
-#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
-
-/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN (1 << 24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS (1 << 27)
-#define DSIM_ESC_CLKEN (1 << 28)
-#define DSIM_TX_REQUEST_HSCLK (1 << 31)
-
-/* DSIM_CONFIG */
-#define DSIM_LANE_EN_CLK (1 << 0)
-#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
-#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
-#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
-#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
-#define DSIM_SUB_VC (((x) & 0x3) << 16)
-#define DSIM_MAIN_VC (((x) & 0x3) << 18)
-#define DSIM_HSA_DISABLE_MODE (1 << 20)
-#define DSIM_HBP_DISABLE_MODE (1 << 21)
-#define DSIM_HFP_DISABLE_MODE (1 << 22)
-/*
- * The i.MX 8M Mini Applications Processor Reference Manual,
- * Rev. 3, 11/2020 Page 4091
- * The i.MX 8M Nano Applications Processor Reference Manual,
- * Rev. 2, 07/2022 Page 3058
- * The i.MX 8M Plus Applications Processor Reference Manual,
- * Rev. 1, 06/2021 Page 5436
- * named this bit as 'HseDisableMode' but the bit definition
- * is quite opposite like
- * 0 = Disables transfer
- * 1 = Enables transfer
- * which clearly states that HSE is not a disable bit.
- *
- * This bit is named as per the manual even though it is not
- * a disable bit however the driver logic for handling HSE
- * is based on the MIPI_DSI_MODE_VIDEO_HSE flag itself.
- */
-#define DSIM_HSE_DISABLE_MODE (1 << 23)
-#define DSIM_AUTO_MODE (1 << 24)
-#define DSIM_VIDEO_MODE (1 << 25)
-#define DSIM_BURST_MODE (1 << 26)
-#define DSIM_SYNC_INFORM (1 << 27)
-#define DSIM_EOT_DISABLE (1 << 28)
-#define DSIM_MFLUSH_VS (1 << 29)
-/* This flag is valid only for exynos3250/3472/5260/5430 */
-#define DSIM_CLKLANE_STOP (1 << 30)
-
-/* DSIM_ESCMODE */
-#define DSIM_TX_TRIGGER_RST (1 << 4)
-#define DSIM_TX_LPDT_LP (1 << 6)
-#define DSIM_CMD_LPDT_LP (1 << 7)
-#define DSIM_FORCE_BTA (1 << 16)
-#define DSIM_FORCE_STOP_STATE (1 << 20)
-#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
-#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
-
-/* DSIM_MDRESOL */
-#define DSIM_MAIN_STAND_BY (1 << 31)
-#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
-#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
-
-/* DSIM_MVPORCH */
-#define DSIM_CMD_ALLOW(x) ((x) << 28)
-#define DSIM_STABLE_VFP(x) ((x) << 16)
-#define DSIM_MAIN_VBP(x) ((x) << 0)
-#define DSIM_CMD_ALLOW_MASK (0xf << 28)
-#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
-#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
-
-/* DSIM_MHPORCH */
-#define DSIM_MAIN_HFP(x) ((x) << 16)
-#define DSIM_MAIN_HBP(x) ((x) << 0)
-#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
-#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
-
-/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
-#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
-#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
-
-/* DSIM_SDRESOL */
-#define DSIM_SUB_STANDY(x) ((x) << 31)
-#define DSIM_SUB_VRESOL(x) ((x) << 16)
-#define DSIM_SUB_HRESOL(x) ((x) << 0)
-#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
-#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
-#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
-
-/* DSIM_INTSRC */
-#define DSIM_INT_PLL_STABLE (1 << 31)
-#define DSIM_INT_SW_RST_RELEASE (1 << 30)
-#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
-#define DSIM_INT_SFR_HDR_FIFO_EMPTY (1 << 28)
-#define DSIM_INT_BTA (1 << 25)
-#define DSIM_INT_FRAME_DONE (1 << 24)
-#define DSIM_INT_RX_TIMEOUT (1 << 21)
-#define DSIM_INT_BTA_TIMEOUT (1 << 20)
-#define DSIM_INT_RX_DONE (1 << 18)
-#define DSIM_INT_RX_TE (1 << 17)
-#define DSIM_INT_RX_ACK (1 << 16)
-#define DSIM_INT_RX_ECC_ERR (1 << 15)
-#define DSIM_INT_RX_CRC_ERR (1 << 14)
-
-/* DSIM_FIFOCTRL */
-#define DSIM_RX_DATA_FULL (1 << 25)
-#define DSIM_RX_DATA_EMPTY (1 << 24)
-#define DSIM_SFR_HEADER_FULL (1 << 23)
-#define DSIM_SFR_HEADER_EMPTY (1 << 22)
-#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
-#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
-#define DSIM_I80_HEADER_FULL (1 << 19)
-#define DSIM_I80_HEADER_EMPTY (1 << 18)
-#define DSIM_I80_PAYLOAD_FULL (1 << 17)
-#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
-#define DSIM_SD_HEADER_FULL (1 << 15)
-#define DSIM_SD_HEADER_EMPTY (1 << 14)
-#define DSIM_SD_PAYLOAD_FULL (1 << 13)
-#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
-#define DSIM_MD_HEADER_FULL (1 << 11)
-#define DSIM_MD_HEADER_EMPTY (1 << 10)
-#define DSIM_MD_PAYLOAD_FULL (1 << 9)
-#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
-#define DSIM_RX_FIFO (1 << 4)
-#define DSIM_SFR_FIFO (1 << 3)
-#define DSIM_I80_FIFO (1 << 2)
-#define DSIM_SD_FIFO (1 << 1)
-#define DSIM_MD_FIFO (1 << 0)
-
-/* DSIM_PHYACCHR */
-#define DSIM_AFC_EN (1 << 14)
-#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
-
-/* DSIM_PLLCTRL */
-#define DSIM_FREQ_BAND(x) ((x) << 24)
-#define DSIM_PLL_EN (1 << 23)
-#define DSIM_PLL_P(x) ((x) << 13)
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
-
-/* DSIM_PHYCTRL */
-#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
-#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP (1 << 30)
-#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP (1 << 14)
-
-/* DSIM_PHYTIMING */
-#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
-#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
-
-/* DSIM_PHYTIMING1 */
-#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
-#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
-#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
-#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
-
-/* DSIM_PHYTIMING2 */
-#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
-#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
-#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
-
-#define DSI_MAX_BUS_WIDTH 4
-#define DSI_NUM_VIRTUAL_CHANNELS 4
-#define DSI_TX_FIFO_SIZE 2048
-#define DSI_RX_FIFO_SIZE 256
-#define DSI_XFER_TIMEOUT_MS 100
-#define DSI_RX_FIFO_EMPTY 0x30800002
-
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
-static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0" };
-
-enum exynos_dsi_transfer_type {
- EXYNOS_DSI_TX,
- EXYNOS_DSI_RX,
-};
-
-struct exynos_dsi_transfer {
- struct list_head list;
- struct completion completed;
- int result;
- struct mipi_dsi_packet packet;
- u16 flags;
- u16 tx_done;
-
- u8 *rx_payload;
- u16 rx_len;
- u16 rx_done;
-};
-
-#define DSIM_STATE_ENABLED BIT(0)
-#define DSIM_STATE_INITIALIZED BIT(1)
-#define DSIM_STATE_CMD_LPM BIT(2)
-#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
-
-struct exynos_dsi_driver_data {
- const unsigned int *reg_ofs;
- unsigned int plltmr_reg;
- unsigned int has_freqband:1;
- unsigned int has_clklane_stop:1;
- unsigned int num_clks;
- unsigned int max_freq;
- unsigned int wait_for_reset;
- unsigned int num_bits_resol;
- const unsigned int *reg_values;
-};
-
struct exynos_dsi {
struct drm_encoder encoder;
- struct mipi_dsi_host dsi_host;
- struct drm_bridge bridge;
- struct drm_bridge *out_bridge;
- struct device *dev;
- struct drm_display_mode mode;
-
- void __iomem *reg_base;
- struct phy *phy;
- struct clk **clks;
- struct regulator_bulk_data supplies[2];
- int irq;
- struct gpio_desc *te_gpio;
-
- u32 pll_clk_rate;
- u32 burst_clk_rate;
- u32 esc_clk_rate;
- u32 lanes;
- u32 mode_flags;
- u32 format;
-
- int state;
- struct drm_property *brightness;
- struct completion completed;
-
- spinlock_t transfer_lock; /* protects transfer_list */
- struct list_head transfer_list;
-
- const struct exynos_dsi_driver_data *driver_data;
-};
-
-#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
-
-static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b)
-{
- return container_of(b, struct exynos_dsi, bridge);
-}
-
-enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
- DSIM_SWRST_REG, /* Software reset register */
- DSIM_CLKCTRL_REG, /* Clock control register */
- DSIM_TIMEOUT_REG, /* Time out register */
- DSIM_CONFIG_REG, /* Configuration register */
- DSIM_ESCMODE_REG, /* Escape mode register */
- DSIM_MDRESOL_REG,
- DSIM_MVPORCH_REG, /* Main display Vporch register */
- DSIM_MHPORCH_REG, /* Main display Hporch register */
- DSIM_MSYNC_REG, /* Main display sync area register */
- DSIM_INTSRC_REG, /* Interrupt source register */
- DSIM_INTMSK_REG, /* Interrupt mask register */
- DSIM_PKTHDR_REG, /* Packet Header FIFO register */
- DSIM_PAYLOAD_REG, /* Payload FIFO register */
- DSIM_RXFIFO_REG, /* Read FIFO register */
- DSIM_FIFOCTRL_REG, /* FIFO status and control register */
- DSIM_PLLCTRL_REG, /* PLL control register */
- DSIM_PHYCTRL_REG,
- DSIM_PHYTIMING_REG,
- DSIM_PHYTIMING1_REG,
- DSIM_PHYTIMING2_REG,
- NUM_REGS
-};
-
-static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx,
- u32 val)
-{
-
- writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx)
-{
- return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static const unsigned int exynos_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x00,
- [DSIM_SWRST_REG] = 0x04,
- [DSIM_CLKCTRL_REG] = 0x08,
- [DSIM_TIMEOUT_REG] = 0x0c,
- [DSIM_CONFIG_REG] = 0x10,
- [DSIM_ESCMODE_REG] = 0x14,
- [DSIM_MDRESOL_REG] = 0x18,
- [DSIM_MVPORCH_REG] = 0x1c,
- [DSIM_MHPORCH_REG] = 0x20,
- [DSIM_MSYNC_REG] = 0x24,
- [DSIM_INTSRC_REG] = 0x2c,
- [DSIM_INTMSK_REG] = 0x30,
- [DSIM_PKTHDR_REG] = 0x34,
- [DSIM_PAYLOAD_REG] = 0x38,
- [DSIM_RXFIFO_REG] = 0x3c,
- [DSIM_FIFOCTRL_REG] = 0x44,
- [DSIM_PLLCTRL_REG] = 0x4c,
- [DSIM_PHYCTRL_REG] = 0x5c,
- [DSIM_PHYTIMING_REG] = 0x64,
- [DSIM_PHYTIMING1_REG] = 0x68,
- [DSIM_PHYTIMING2_REG] = 0x6c,
-};
-
-static const unsigned int exynos5433_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x04,
- [DSIM_SWRST_REG] = 0x0C,
- [DSIM_CLKCTRL_REG] = 0x10,
- [DSIM_TIMEOUT_REG] = 0x14,
- [DSIM_CONFIG_REG] = 0x18,
- [DSIM_ESCMODE_REG] = 0x1C,
- [DSIM_MDRESOL_REG] = 0x20,
- [DSIM_MVPORCH_REG] = 0x24,
- [DSIM_MHPORCH_REG] = 0x28,
- [DSIM_MSYNC_REG] = 0x2C,
- [DSIM_INTSRC_REG] = 0x34,
- [DSIM_INTMSK_REG] = 0x38,
- [DSIM_PKTHDR_REG] = 0x3C,
- [DSIM_PAYLOAD_REG] = 0x40,
- [DSIM_RXFIFO_REG] = 0x44,
- [DSIM_FIFOCTRL_REG] = 0x4C,
- [DSIM_PLLCTRL_REG] = 0x94,
- [DSIM_PHYCTRL_REG] = 0xA4,
- [DSIM_PHYTIMING_REG] = 0xB4,
- [DSIM_PHYTIMING1_REG] = 0xB8,
- [DSIM_PHYTIMING2_REG] = 0xBC,
-};
-
-enum reg_value_idx {
- RESET_TYPE,
- PLL_TIMER,
- STOP_STATE_CNT,
- PHYCTRL_ULPS_EXIT,
- PHYCTRL_VREG_LP,
- PHYCTRL_SLEW_UP,
- PHYTIMING_LPX,
- PHYTIMING_HS_EXIT,
- PHYTIMING_CLK_PREPARE,
- PHYTIMING_CLK_ZERO,
- PHYTIMING_CLK_POST,
- PHYTIMING_CLK_TRAIL,
- PHYTIMING_HS_PREPARE,
- PHYTIMING_HS_ZERO,
- PHYTIMING_HS_TRAIL
-};
-
-static const unsigned int reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
-};
-
-static const unsigned int exynos5422_reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
-};
-
-static const unsigned int exynos5433_reg_values[] = {
- [RESET_TYPE] = DSIM_FUNCRST,
- [PLL_TIMER] = 22200,
- [STOP_STATE_CNT] = 0xa,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
- [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
- [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
-};
-
-static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
};
-static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x58,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 5,
- .max_freq = 1500,
- .wait_for_reset = 0,
- .num_bits_resol = 12,
- .reg_values = exynos5433_reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1500,
- .wait_for_reset = 1,
- .num_bits_resol = 12,
- .reg_values = exynos5422_reg_values,
-};
-
-static const struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos3250-mipi-dsi",
- .data = &exynos3_dsi_driver_data },
- { .compatible = "samsung,exynos4210-mipi-dsi",
- .data = &exynos4_dsi_driver_data },
- { .compatible = "samsung,exynos5410-mipi-dsi",
- .data = &exynos5_dsi_driver_data },
- { .compatible = "samsung,exynos5422-mipi-dsi",
- .data = &exynos5422_dsi_driver_data },
- { .compatible = "samsung,exynos5433-mipi-dsi",
- .data = &exynos5433_dsi_driver_data },
- { }
-};
-
-static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
-{
- if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
- return;
-
- dev_err(dsi->dev, "timeout waiting for reset\n");
-}
-
-static void exynos_dsi_reset(struct exynos_dsi *dsi)
-{
- u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
-
- reinit_completion(&dsi->completed);
- exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val);
-}
-
-#ifndef MHZ
-#define MHZ (1000*1000)
-#endif
-
-static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
- unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long best_freq = 0;
- u32 min_delta = 0xffffffff;
- u8 p_min, p_max;
- u8 _p, best_p;
- u16 _m, best_m;
- u8 _s, best_s;
-
- p_min = DIV_ROUND_UP(fin, (12 * MHZ));
- p_max = fin / (6 * MHZ);
-
- for (_p = p_min; _p <= p_max; ++_p) {
- for (_s = 0; _s <= 5; ++_s) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * (_p << _s);
- do_div(tmp, fin);
- _m = tmp;
- if (_m < 41 || _m > 125)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p);
- if (tmp < 500 * MHZ ||
- tmp > driver_data->max_freq * MHZ)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p << _s);
-
- delta = abs(fout - tmp);
- if (delta < min_delta) {
- best_p = _p;
- best_m = _m;
- best_s = _s;
- min_delta = delta;
- best_freq = tmp;
- }
- }
- }
-
- if (best_freq) {
- *p = best_p;
- *m = best_m;
- *s = best_s;
- }
-
- return best_freq;
-}
-
-static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
- unsigned long freq)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long fin, fout;
- int timeout;
- u8 p, s;
- u16 m;
- u32 reg;
-
- fin = dsi->pll_clk_rate;
- fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
- if (!fout) {
- dev_err(dsi->dev,
- "failed to find PLL PMS for requested frequency\n");
- return 0;
- }
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
-
- writel(driver_data->reg_values[PLL_TIMER],
- dsi->reg_base + driver_data->plltmr_reg);
-
- reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
-
- if (driver_data->has_freqband) {
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
- int band;
-
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
-
- dev_dbg(dsi->dev, "band %d\n", band);
-
- reg |= DSIM_FREQ_BAND(band);
- }
-
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-
- timeout = 1000;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "PLL failed to stabilize\n");
- return 0;
- }
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
-
- return fout;
-}
-
-static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
-{
- unsigned long hs_clk, byte_clk, esc_clk;
- unsigned long esc_div;
- u32 reg;
-
- hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
- if (!hs_clk) {
- dev_err(dsi->dev, "failed to configure DSI PLL\n");
- return -EFAULT;
- }
-
- byte_clk = hs_clk / 8;
- esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
- esc_clk = byte_clk / esc_div;
-
- if (esc_clk > 20 * MHZ) {
- ++esc_div;
- esc_clk = byte_clk / esc_div;
- }
-
- dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
- hs_clk, byte_clk, esc_clk);
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- const unsigned int *reg_values = driver_data->reg_values;
- u32 reg;
-
- if (driver_data->has_freqband)
- return;
-
- /* B D-PHY: D-PHY Master & Slave Analog Block control */
- reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
- reg_values[PHYCTRL_SLEW_UP];
- exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg);
-
- /*
- * T LPX: Transmitted length of any Low-Power state period
- * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
- * burst
- */
- reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
- exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg);
-
- /*
- * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Clock.
- * T CLK_POST: Time that the transmitter continues to send HS clock
- * after the last associated Data Lane has transitioned to LP Mode
- * Interval is defined as the period from the end of T HS-TRAIL to
- * the beginning of T CLK-TRAIL
- * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
- * the last payload clock bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_CLK_PREPARE] |
- reg_values[PHYTIMING_CLK_ZERO] |
- reg_values[PHYTIMING_CLK_POST] |
- reg_values[PHYTIMING_CLK_TRAIL];
-
- exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg);
-
- /*
- * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Sync sequence.
- * T HS-TRAIL: Time that the transmitter drives the flipped differential
- * state after last payload data bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
- reg_values[PHYTIMING_HS_TRAIL];
- exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg);
-}
-
-static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG);
- reg &= ~DSIM_PLL_EN;
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-}
-
-static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
-{
- u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG);
- reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
- DSIM_LANE_EN(lane));
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-}
-
-static int exynos_dsi_init_link(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int timeout;
- u32 reg;
- u32 lanes_mask;
-
- /* Initialize FIFO pointers */
- reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
- reg &= ~0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
-
- usleep_range(9000, 11000);
-
- reg |= 0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
- usleep_range(9000, 11000);
-
- /* DSI configuration */
- reg = 0;
-
- /*
- * The first bit of mode_flags specifies display configuration.
- * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
- * mode, otherwise it will support command mode.
- */
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
-
- /*
- * The user manual describes that following bits are ignored in
- * command mode.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- reg |= DSIM_SYNC_INFORM;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- reg |= DSIM_BURST_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
- reg |= DSIM_AUTO_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
- reg |= DSIM_HSE_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
- reg |= DSIM_HFP_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
- reg |= DSIM_HBP_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
- reg |= DSIM_HSA_DISABLE_MODE;
- }
-
- if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
- reg |= DSIM_EOT_DISABLE;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
- break;
- case MIPI_DSI_FMT_RGB666:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
- break;
- case MIPI_DSI_FMT_RGB565:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
- break;
- default:
- dev_err(dsi->dev, "invalid pixel format\n");
- return -EINVAL;
- }
-
- /*
- * Use non-continuous clock mode if the periparal wants and
- * host controller supports
- *
- * In non-continous clock mode, host controller will turn off
- * the HS clock between high-speed transmissions to reduce
- * power consumption.
- */
- if (driver_data->has_clklane_stop &&
- dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
- reg |= DSIM_CLKLANE_STOP;
- }
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-
- lanes_mask = BIT(dsi->lanes) - 1;
- exynos_dsi_enable_lane(dsi, lanes_mask);
-
- /* Check clock and data lane state are stop state */
- timeout = 100;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "waiting for bus lanes timed out\n");
- return -EFAULT;
- }
-
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
- != DSIM_STOP_STATE_DAT(lanes_mask))
- continue;
- } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
-
- reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- reg &= ~DSIM_STOP_STATE_CNT_MASK;
- reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg);
-
- reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
- exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
-{
- struct drm_display_mode *m = &dsi->mode;
- unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
- u32 reg;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg = DSIM_CMD_ALLOW(0xf)
- | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
- | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
- exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
-
- reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
- | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
- exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
-
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
- | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
- exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
- }
- reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
- DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
-
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-
- dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
-}
-
-static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG);
- if (enable)
- reg |= DSIM_MAIN_STAND_BY;
- else
- reg &= ~DSIM_MAIN_STAND_BY;
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-}
-
-static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
-{
- int timeout = 2000;
-
- do {
- u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
-
- if (!(reg & DSIM_SFR_HEADER_FULL))
- return 0;
-
- if (!cond_resched())
- usleep_range(950, 1050);
- } while (--timeout);
-
- return -ETIMEDOUT;
-}
-
-static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
-
- if (lpm)
- v |= DSIM_CMD_LPDT_LP;
- else
- v &= ~DSIM_CMD_LPDT_LP;
-
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- v |= DSIM_FORCE_BTA;
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- struct device *dev = dsi->dev;
- struct mipi_dsi_packet *pkt = &xfer->packet;
- const u8 *payload = pkt->payload + xfer->tx_done;
- u16 length = pkt->payload_length - xfer->tx_done;
- bool first = !xfer->tx_done;
- u32 reg;
-
- dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
- xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
-
- if (length > DSI_TX_FIFO_SIZE)
- length = DSI_TX_FIFO_SIZE;
-
- xfer->tx_done += length;
-
- /* Send payload */
- while (length >= 4) {
- reg = get_unaligned_le32(payload);
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- payload += 4;
- length -= 4;
- }
-
- reg = 0;
- switch (length) {
- case 3:
- reg |= payload[2] << 16;
- fallthrough;
- case 2:
- reg |= payload[1] << 8;
- fallthrough;
- case 1:
- reg |= payload[0];
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- break;
- }
-
- /* Send packet header */
- if (!first)
- return;
-
- reg = get_unaligned_le32(pkt->header);
- if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
- }
-
- if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
- dsi->state & DSIM_STATE_CMD_LPM)) {
- exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
- dsi->state ^= DSIM_STATE_CMD_LPM;
- }
-
- exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg);
-
- if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
- exynos_dsi_force_bta(dsi);
-}
-
-static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- u8 *payload = xfer->rx_payload + xfer->rx_done;
- bool first = !xfer->rx_done;
- struct device *dev = dsi->dev;
- u16 length;
- u32 reg;
-
- if (first) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
-
- switch (reg & 0x3f) {
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
- if (xfer->rx_len >= 2) {
- payload[1] = reg >> 16;
- ++xfer->rx_done;
- }
- fallthrough;
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
- payload[0] = reg >> 8;
- ++xfer->rx_done;
- xfer->rx_len = xfer->rx_done;
- xfer->result = 0;
- goto clear_fifo;
- case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
- dev_err(dev, "DSI Error Report: 0x%04x\n",
- (reg >> 8) & 0xffff);
- xfer->result = 0;
- goto clear_fifo;
- }
-
- length = (reg >> 8) & 0xffff;
- if (length > xfer->rx_len) {
- dev_err(dev,
- "response too long (%u > %u bytes), stripping\n",
- xfer->rx_len, length);
- length = xfer->rx_len;
- } else if (length < xfer->rx_len)
- xfer->rx_len = length;
- }
-
- length = xfer->rx_len - xfer->rx_done;
- xfer->rx_done += length;
-
- /* Receive payload */
- while (length >= 4) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- payload[0] = (reg >> 0) & 0xff;
- payload[1] = (reg >> 8) & 0xff;
- payload[2] = (reg >> 16) & 0xff;
- payload[3] = (reg >> 24) & 0xff;
- payload += 4;
- length -= 4;
- }
-
- if (length) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- switch (length) {
- case 3:
- payload[2] = (reg >> 16) & 0xff;
- fallthrough;
- case 2:
- payload[1] = (reg >> 8) & 0xff;
- fallthrough;
- case 1:
- payload[0] = reg & 0xff;
- }
- }
-
- if (xfer->rx_done == xfer->rx_len)
- xfer->result = 0;
-
-clear_fifo:
- length = DSI_RX_FIFO_SIZE / 4;
- do {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- if (reg == DSI_RX_FIFO_EMPTY)
- break;
- } while (--length);
-}
-
-static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
-{
- unsigned long flags;
- struct exynos_dsi_transfer *xfer;
- bool start = false;
-
-again:
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
-
- exynos_dsi_send_to_fifo(dsi, xfer);
-
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
-
- xfer->result = 0;
- complete(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
-}
-
-static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
-{
- struct exynos_dsi_transfer *xfer;
- unsigned long flags;
- bool start = true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return false;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- dev_dbg(dsi->dev,
- "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
- xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
- xfer->rx_done);
-
- if (xfer->tx_done != xfer->packet.payload_length)
- return true;
-
- if (xfer->rx_done != xfer->rx_len)
- exynos_dsi_read_from_fifo(dsi, xfer);
-
- if (xfer->rx_done != xfer->rx_len)
- return true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (!xfer->rx_len)
- xfer->result = 0;
- complete(&xfer->completed);
-
- return start;
-}
-
-static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
+static irqreturn_t exynos_dsi_te_irq_handler(struct samsung_dsim *dsim)
{
- unsigned long flags;
- bool start;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (!list_empty(&dsi->transfer_list) &&
- xfer == list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list)) {
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (start)
- exynos_dsi_transfer_start(dsi);
- return;
- }
-
- list_del_init(&xfer->list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-}
-
-static int exynos_dsi_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- unsigned long flags;
- bool stopped;
-
- xfer->tx_done = 0;
- xfer->rx_done = 0;
- xfer->result = -ETIMEDOUT;
- init_completion(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- stopped = list_empty(&dsi->transfer_list);
- list_add_tail(&xfer->list, &dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (stopped)
- exynos_dsi_transfer_start(dsi);
-
- wait_for_completion_timeout(&xfer->completed,
- msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
- if (xfer->result == -ETIMEDOUT) {
- struct mipi_dsi_packet *pkt = &xfer->packet;
- exynos_dsi_remove_transfer(dsi, xfer);
- dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
- (int)pkt->payload_length, pkt->payload);
- return -ETIMEDOUT;
- }
-
- /* Also covers hardware timeout condition */
- return xfer->result;
-}
-
-static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = dev_id;
- u32 status;
-
- status = exynos_dsi_read(dsi, DSIM_INTSRC_REG);
- if (!status) {
- static unsigned long int j;
- if (printk_timed_ratelimit(&j, 500))
- dev_warn(dsi->dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
- exynos_dsi_write(dsi, DSIM_INTSRC_REG, status);
-
- if (status & DSIM_INT_SW_RST_RELEASE) {
- u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR |
- DSIM_INT_SW_RST_RELEASE);
- exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
- complete(&dsi->completed);
- return IRQ_HANDLED;
- }
-
- if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_PLL_STABLE)))
- return IRQ_HANDLED;
-
- if (exynos_dsi_transfer_finish(dsi))
- exynos_dsi_transfer_start(dsi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
- if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
+ if (dsim->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
return IRQ_HANDLED;
}
-static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
-{
- enable_irq(dsi->irq);
-
- if (dsi->te_gpio)
- enable_irq(gpiod_to_irq(dsi->te_gpio));
-}
-
-static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio)
- disable_irq(gpiod_to_irq(dsi->te_gpio));
-
- disable_irq(dsi->irq);
-}
-
-static int exynos_dsi_init(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
-
- exynos_dsi_reset(dsi);
- exynos_dsi_enable_irq(dsi);
-
- if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
- exynos_dsi_enable_lane(dsi, BIT(dsi->lanes) - 1);
-
- exynos_dsi_enable_clock(dsi);
- if (driver_data->wait_for_reset)
- exynos_dsi_wait_for_reset(dsi);
- exynos_dsi_set_phy_ctrl(dsi);
- exynos_dsi_init_link(dsi);
-
- return 0;
-}
-
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
- struct device *panel)
-{
- int ret;
- int te_gpio_irq;
-
- dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
- if (!dsi->te_gpio) {
- return 0;
- } else if (IS_ERR(dsi->te_gpio)) {
- dev_err(dsi->dev, "gpio request failed with %ld\n",
- PTR_ERR(dsi->te_gpio));
- return PTR_ERR(dsi->te_gpio);
- }
-
- te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
-
- ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
- if (ret) {
- dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
- gpiod_put(dsi->te_gpio);
- return ret;
- }
-
- return 0;
-}
-
-static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio) {
- free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
- gpiod_put(dsi->te_gpio);
- }
-}
-
-static void exynos_dsi_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
- int ret;
-
- if (dsi->state & DSIM_STATE_ENABLED)
- return;
-
- ret = pm_runtime_resume_and_get(dsi->dev);
- if (ret < 0) {
- dev_err(dsi->dev, "failed to enable DSI device.\n");
- return;
- }
-
- dsi->state |= DSIM_STATE_ENABLED;
-}
-
-static void exynos_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_mode(dsi);
- exynos_dsi_set_display_enable(dsi, true);
-
- dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
- return;
-}
-
-static void exynos_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return;
-
- dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
-}
-
-static void exynos_dsi_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_enable(dsi, false);
-
- dsi->state &= ~DSIM_STATE_ENABLED;
- pm_runtime_put_sync(dsi->dev);
-}
-
-static void exynos_dsi_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- drm_mode_copy(&dsi->mode, adjusted_mode);
-}
-
-static int exynos_dsi_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
- flags);
-}
-
-static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
- .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_pre_enable = exynos_dsi_atomic_pre_enable,
- .atomic_enable = exynos_dsi_atomic_enable,
- .atomic_disable = exynos_dsi_atomic_disable,
- .atomic_post_disable = exynos_dsi_atomic_post_disable,
- .mode_set = exynos_dsi_mode_set,
- .attach = exynos_dsi_attach,
-};
-
-MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
-
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+static int exynos_dsi_host_attach(struct samsung_dsim *dsim,
struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct device *dev = dsi->dev;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm = encoder->dev;
- struct drm_panel *panel;
- int ret;
-
- panel = of_drm_find_panel(device->dev.of_node);
- if (!IS_ERR(panel)) {
- dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
- } else {
- dsi->out_bridge = of_drm_find_bridge(device->dev.of_node);
- if (!dsi->out_bridge)
- dsi->out_bridge = ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR(dsi->out_bridge)) {
- ret = PTR_ERR(dsi->out_bridge);
- DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
- return ret;
- }
-
- DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
- drm_bridge_add(&dsi->bridge);
-
- drm_bridge_attach(encoder, &dsi->bridge,
+ drm_bridge_attach(encoder, &dsim->bridge,
list_first_entry_or_null(&encoder->bridge_chain,
struct drm_bridge,
chain_node), 0);
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- ret = exynos_dsi_register_te_irq(dsi, &device->dev);
- if (ret)
- return ret;
- }
-
mutex_lock(&drm->mode_config.mutex);
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
+ dsim->lanes = device->lanes;
+ dsim->format = device->format;
+ dsim->mode_flags = device->mode_flags;
exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+ !(dsim->mode_flags & MIPI_DSI_MODE_VIDEO);
mutex_unlock(&drm->mode_config.mutex);
@@ -1525,100 +60,20 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
+static void exynos_dsi_host_detach(struct samsung_dsim *dsim,
+ struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_device *drm = dsi->encoder.dev;
- if (dsi->out_bridge->funcs->detach)
- dsi->out_bridge->funcs->detach(dsi->out_bridge);
- dsi->out_bridge = NULL;
-
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- drm_bridge_remove(&dsi->bridge);
-
- return 0;
}
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
+static int exynos_dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
-static int exynos_dsi_of_read_u32(const struct device_node *np,
- const char *propname, u32 *out_value)
-{
- int ret = of_property_read_u32(np, propname, out_value);
-
- if (ret < 0)
- pr_err("%pOF: failed to get '%s' property\n", np, propname);
-
- return ret;
-}
-
-static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
-{
- struct device *dev = dsi->dev;
- struct device_node *node = dev->of_node;
- int ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
- &dsi->pll_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
- &dsi->burst_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
- &dsi->esc_clk_rate);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int exynos_dsi_bind(struct device *dev, struct device *master,
- void *data)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm_dev = data;
int ret;
@@ -1629,17 +84,16 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (ret < 0)
return ret;
- return mipi_dsi_host_register(&dsi->dsi_host);
+ return mipi_dsi_host_register(&dsim->dsi_host);
}
-static void exynos_dsi_unbind(struct device *dev, struct device *master,
- void *data)
+static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data)
{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
- exynos_dsi_atomic_disable(&dsi->bridge, NULL);
+ dsim->bridge.funcs->atomic_disable(&dsim->bridge, NULL);
- mipi_dsi_host_unregister(&dsi->dsi_host);
+ mipi_dsi_host_unregister(&dsim->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {
@@ -1647,189 +101,90 @@ static const struct component_ops exynos_dsi_component_ops = {
.unbind = exynos_dsi_unbind,
};
-static int exynos_dsi_probe(struct platform_device *pdev)
+static int exynos_dsi_register_host(struct samsung_dsim *dsim)
{
- struct device *dev = &pdev->dev;
struct exynos_dsi *dsi;
- int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = devm_kzalloc(dsim->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- init_completion(&dsi->completed);
- spin_lock_init(&dsi->transfer_lock);
- INIT_LIST_HEAD(&dsi->transfer_list);
-
- dsi->dsi_host.ops = &exynos_dsi_ops;
- dsi->dsi_host.dev = dev;
+ dsim->priv = dsi;
+ dsim->bridge.pre_enable_prev_first = true;
- dsi->dev = dev;
- dsi->driver_data = of_device_get_match_data(dev);
-
- dsi->supplies[0].supply = "vddcore";
- dsi->supplies[1].supply = "vddio";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
- dsi->supplies);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get regulators\n");
-
- dsi->clks = devm_kcalloc(dev,
- dsi->driver_data->num_clks, sizeof(*dsi->clks),
- GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev,
- OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n",
- clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
- }
-
- dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dsi->reg_base))
- return PTR_ERR(dsi->reg_base);
-
- dsi->phy = devm_phy_get(dev, "dsim");
- if (IS_ERR(dsi->phy)) {
- dev_info(dev, "failed to get dsim phy\n");
- return PTR_ERR(dsi->phy);
- }
-
- dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0)
- return dsi->irq;
-
- ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
- exynos_dsi_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- dev_name(dev), dsi);
- if (ret) {
- dev_err(dev, "failed to request dsi irq\n");
- return ret;
- }
-
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, dsi);
-
- pm_runtime_enable(dev);
-
- dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
- dsi->bridge.of_node = dev->of_node;
- dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- dsi->bridge.pre_enable_prev_first = true;
-
- ret = component_add(dev, &exynos_dsi_component_ops);
- if (ret)
- goto err_disable_runtime;
-
- return 0;
-
-err_disable_runtime:
- pm_runtime_disable(dev);
-
- return ret;
+ return component_add(dsim->dev, &exynos_dsi_component_ops);
}
-static int exynos_dsi_remove(struct platform_device *pdev)
+static void exynos_dsi_unregister_host(struct samsung_dsim *dsim)
{
- pm_runtime_disable(&pdev->dev);
-
- component_del(&pdev->dev, &exynos_dsi_component_ops);
-
- return 0;
-}
-
-static int __maybe_unused exynos_dsi_suspend(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- usleep_range(10000, 20000);
-
- if (dsi->state & DSIM_STATE_INITIALIZED) {
- dsi->state &= ~DSIM_STATE_INITIALIZED;
-
- exynos_dsi_disable_clock(dsi);
-
- exynos_dsi_disable_irq(dsi);
- }
-
- dsi->state &= ~DSIM_STATE_CMD_LPM;
-
- phy_power_off(dsi->phy);
-
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0)
- dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
-
- return 0;
+ component_del(dsim->dev, &exynos_dsi_component_ops);
}
-static int __maybe_unused exynos_dsi_resume(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
- return ret;
- }
+static const struct samsung_dsim_host_ops exynos_dsi_exynos_host_ops = {
+ .register_host = exynos_dsi_register_host,
+ .unregister_host = exynos_dsi_unregister_host,
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .te_irq_handler = exynos_dsi_te_irq_handler,
+};
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos3250_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS3250,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- ret = phy_power_on(dsi->phy);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable phy %d\n", ret);
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos4210_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS4210,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return 0;
+static const struct samsung_dsim_plat_data exynos5410_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5410,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
- regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+static const struct samsung_dsim_plat_data exynos5422_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5422,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return ret;
-}
+static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5433,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-static const struct dev_pm_ops exynos_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+static const struct of_device_id exynos_dsi_of_match[] = {
+ {
+ .compatible = "samsung,exynos3250-mipi-dsi",
+ .data = &exynos3250_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4210_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5410_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5422-mipi-dsi",
+ .data = &exynos5422_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5433-mipi-dsi",
+ .data = &exynos5433_dsi_pdata,
+ },
+ { /* sentinel. */ }
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
struct platform_driver dsi_driver = {
- .probe = exynos_dsi_probe,
- .remove = exynos_dsi_remove,
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
- .pm = &exynos_dsi_pm_ops,
+ .pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 918470a04591..057ef22fa9c6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -195,6 +195,7 @@ i915-y += \
i915-y += \
gt/uc/intel_gsc_fw.o \
gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@@ -239,6 +240,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
+ display/intel_display_rps.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
@@ -254,6 +256,7 @@ i915-y += \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
+ display/intel_hdcp_gsc.o \
display/intel_hotplug.o \
display/intel_hti.o \
display/intel_lpe_audio.o \
@@ -266,10 +269,13 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
+ display/intel_sprite_uapi.o \
display/intel_tc.o \
display/intel_vblank.o \
display/intel_vga.o \
+ display/intel_wm.o \
display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index fa754038d669..920d570f7594 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -17,6 +17,7 @@
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp;
-
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- trans_dp |= TRANS_DP_ENH_FRAMING;
- else
- trans_dp &= ~TRANS_DP_ENH_FRAMING;
- intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ TRANS_DP_ENH_FRAMING,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd) ?
+ TRANS_DP_ENH_FRAMING : 0);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -1200,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
-static bool gm45_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1279,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return false;
@@ -1295,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -1377,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- dig_port->connected = gm45_digital_port_connected;
- else
- dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
dig_port->connected = ilk_digital_port_connected;
@@ -1391,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 64c3b3990702..448ea26786e0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
@@ -273,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
@@ -290,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
@@ -548,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return;
@@ -564,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -629,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 83aa3800245f..2910f5d0f3e2 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -267,3 +267,40 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
crtc_state->ips_enabled = true;
}
}
+
+static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(i915))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ str_yes_no(i915->params.enable_ips));
+
+ if (DISPLAY_VER(i915) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
+
+void hsw_ips_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root,
+ i915, &hsw_ips_debugfs_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 4564dee497d7..7ed6061874f7 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -22,5 +23,6 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+void hsw_ips_debugfs_register(struct drm_i915_private *i915);
#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
new file mode 100644
index 000000000000..caef72d38798
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -0,0 +1,4047 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_atomic.h"
+#include "intel_display.h"
+#include "intel_display_trace.h"
+#include "intel_mchbar_regs.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+#include "vlv_sideband.h"
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
+struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
+};
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
+
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+
+ vlv_punit_put(dev_priv);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
+static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool was_enabled;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
+ } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_PINEVIEW(dev_priv)) {
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
+ if (enable)
+ val |= PINEVIEW_SELF_REFRESH_EN;
+ else
+ val &= ~PINEVIEW_SELF_REFRESH_EN;
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_I915GM(dev_priv)) {
+ /*
+ * FIXME can't find a bit like this for 915G, and
+ * yet it does have the related watermark in
+ * FW_BLC_SELF. What's going on?
+ */
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ } else {
+ return false;
+ }
+
+ trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ str_enabled_disabled(enable),
+ str_enabled_disabled(was_enabled));
+
+ return was_enabled;
+}
+
+/**
+ * intel_set_memory_cxsr - Configure CxSR state
+ * @dev_priv: i915 device
+ * @enable: Allow vs. disallow CxSR
+ *
+ * Allow or disallow the system to enter a special CxSR
+ * (C-state self refresh) state. What typically happens in CxSR mode
+ * is that several display FIFOs may get combined into a single larger
+ * FIFO for a particular plane (so called max FIFO mode) to allow the
+ * system to defer memory fetches longer, and the memory will enter
+ * self refresh.
+ *
+ * Note that enabling CxSR does not guarantee that the system enter
+ * this special mode, nor does it guarantee that the system stays
+ * in that mode once entered. So this just allows/disallows the system
+ * to autonomously utilize the CxSR mode. Other factors such as core
+ * C-states will affect when/if the system actually enters/exits the
+ * CxSR mode.
+ *
+ * Note that on VLV/CHV this actually only controls the max FIFO mode,
+ * and the system is free to enter/exit memory self refresh at any time
+ * even when the use of CxSR has been disallowed.
+ *
+ * While the system is actually in the CxSR/max FIFO mode, some plane
+ * control registers will not get latched on vblank. Thus in order to
+ * guarantee the system will respond to changes in the plane registers
+ * we must always disallow CxSR prior to making changes to those registers.
+ * Unfortunately the system will re-evaluate the CxSR conditions at
+ * frame start which happens after vblank start (which is when the plane
+ * registers would get latched), so we can't proceed with the plane update
+ * during the same frame where we disallowed CxSR.
+ *
+ * Certain platforms also have a deeper HPLL SR mode. Fortunately the
+ * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
+ * the hardware w.r.t. HPLL SR when writing to plane registers.
+ * Disallowing just CxSR is sufficient.
+ */
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(dev_priv, enable);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.wm.vlv.cxsr = enable;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+
+ return ret;
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int pessimal_latency_ns = 5000;
+
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
+
+ switch (pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ MISSING_CASE(pipe);
+ return;
+ }
+
+ fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
+ fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
+ fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
+ fifo_state->plane[PLANE_CURSOR] = 63;
+}
+
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pnv_display_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i945_wm_info = {
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i915_wm_info = {
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_a_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_bc_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM / 2,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i845_wm_info = {
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+/**
+ * intel_wm_method1 - Method 1 / "small buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 1 or "small buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the short term drain rate
+ * of the FIFO, ie. it does not account for blanking periods
+ * which would effectively reduce the average drain rate across
+ * a longer period. The name "small" refers to the fact the
+ * FIFO is relatively small compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\ |\
+ * | \ | \
+ * __---__---__ (- plane active, _ blanking)
+ * -> time
+ *
+ * or perhaps like this:
+ *
+ * |\|\ |\|\
+ * __----__----__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ u64 ret;
+
+ ret = mul_u32_u32(pixel_rate, cpp * latency);
+ ret = DIV_ROUND_UP_ULL(ret, 10000);
+
+ return ret;
+}
+
+/**
+ * intel_wm_method2 - Method 2 / "large buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @htotal: Pipe horizontal total
+ * @width: Plane width in pixels
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 2 or "large buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the long term drain rate
+ * of the FIFO, ie. it does account for blanking periods
+ * which effectively reduce the average drain rate across
+ * a longer period. The name "large" refers to the fact the
+ * FIFO is relatively large compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\___ |\___
+ * | \___ | \___
+ * | \ | \
+ * __ --__--__--__--__--__--__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ /*
+ * FIXME remove once all users are computing
+ * watermarks in the correct place.
+ */
+ if (WARN_ON_ONCE(htotal == 0))
+ htotal = 1;
+
+ ret = (latency * pixel_rate) / (htotal * 10000);
+ ret = (ret + 1) * width * cpp;
+
+ return ret;
+}
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @pixel_rate: pixel clock
+ * @wm: chip FIFO params
+ * @fifo_size: size of the FIFO buffer
+ * @cpp: bytes per pixel
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned int intel_calculate_wm(int pixel_rate,
+ const struct intel_watermark_params *wm,
+ int fifo_size, int cpp,
+ unsigned int latency_ns)
+{
+ int entries, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries = intel_wm_method1(pixel_rate, cpp,
+ latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
+ wm->guard_size;
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
+
+ wm_size = fifo_size - entries;
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+
+ /*
+ * Bspec seems to indicate that the value shouldn't be lower than
+ * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
+ * Lets go for 8 which is the burst size since certain platforms
+ * already use a hardcoded 8 (which is what the spec says should be
+ * done).
+ */
+ if (wm_size <= 8)
+ wm_size = 8;
+
+ return wm_size;
+}
+
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc && crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
+}
+
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc, *enabled = NULL;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pnv_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned int wm;
+
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
+ intel_set_memory_cxsr(dev_priv, false);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int cpp = fb->format->cpp[0];
+
+ /* Display SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
+ cpp, latency->display_sr);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= FW_WM(wm, SR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
+ 4, latency->cursor_sr);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
+ FW_WM(wm, CURSOR_SR));
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ cpp, latency->display_hpll_disable);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ 4, latency->cursor_hpll_disable);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= FW_WM(wm, HPLL_CURSOR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+
+ intel_set_memory_cxsr(dev_priv, true);
+ } else {
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+}
+
+/*
+ * Documentation says:
+ * "If the line size is small, the TLB fetches can get in the way of the
+ * data fetches, causing some lag in the pixel data return which is not
+ * accounted for in the above formulas. The following adjustment only
+ * needs to be applied if eight whole lines fit in the buffer at once.
+ * The WM is adjusted upwards by the difference between the FIFO size
+ * and the size of 8 whole lines. This adjustment is always performed
+ * in the actual pixel depth regardless of whether FBC is enabled or not."
+ */
+static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
+{
+ int tlb_miss = fifo_size * 64 - width * cpp * 8;
+
+ return max(0, tlb_miss);
+}
+
+static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct g4x_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
+
+static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ }
+
+ /*
+ * Zero the (unused) WM1 watermarks, and also clear all the
+ * high order bits so that there are no out of bounds values
+ * present in the registers during the reprogramming.
+ */
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ } else {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ }
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#undef FW_WM_VLV
+
+static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+
+ dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+}
+
+static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
+{
+ /*
+ * DSPCNTR[13] supposedly controls whether the
+ * primary plane can use the FIFO space otherwise
+ * reserved for the sprite plane. It's not 100% clear
+ * what the actual FIFO size is, but it looks like we
+ * can happily set both primary and sprite watermarks
+ * up to 127 cachelines. So that would seem to mean
+ * that either DSPCNTR[13] doesn't do anything, or that
+ * the total FIFO is >= 256 cachelines in size. Either
+ * way, we don't seem to have to worry about this
+ * repartitioning as the maximum watermark value the
+ * register can hold for each plane is lower than the
+ * minimum FIFO size.
+ */
+ switch (plane_id) {
+ case PLANE_CURSOR:
+ return 63;
+ case PLANE_PRIMARY:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
+ case PLANE_SPRITE0:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
+ default:
+ MISSING_CASE(plane_id);
+ return 0;
+ }
+}
+
+static int g4x_fbc_fifo_size(int level)
+{
+ switch (level) {
+ case G4X_WM_LEVEL_SR:
+ return 7;
+ case G4X_WM_LEVEL_HPLL:
+ return 15;
+ default:
+ MISSING_CASE(level);
+ return 0;
+ }
+}
+
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (latency == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ /*
+ * WaUse32BppForSRWM:ctg,elk
+ *
+ * The spec fails to list this restriction for the
+ * HPLL watermark, which seems a little strange.
+ * Let's use 32bpp for the HPLL watermark as well.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ level != G4X_WM_LEVEL_NORMAL)
+ cpp = max(cpp, 4u);
+
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+ } else if (plane->id == PLANE_PRIMARY &&
+ level == G4X_WM_LEVEL_NORMAL) {
+ wm = intel_wm_method1(pixel_rate, cpp, latency);
+ } else {
+ unsigned int small, large;
+
+ small = intel_wm_method1(pixel_rate, cpp, latency);
+ large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+
+ wm = min(small, large);
+ }
+
+ wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
+ width, cpp);
+
+ wm = DIV_ROUND_UP(wm, 64) + 2;
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
+ int level, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ /* NORMAL level doesn't have an FBC watermark */
+ level = max(level, G4X_WM_LEVEL_SR);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->fbc != value;
+ raw->fbc = value;
+ }
+
+ return dirty;
+}
+
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val);
+
+static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ bool dirty = false;
+ int level;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+ int wm, max_wm;
+
+ wm = g4x_compute_wm(crtc_state, plane_state, level);
+ max_wm = g4x_plane_fifo_size(plane_id, level);
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+
+ if (plane_id != PLANE_PRIMARY ||
+ level == G4X_WM_LEVEL_NORMAL)
+ continue;
+
+ wm = ilk_compute_fbc_wm(crtc_state, plane_state,
+ raw->plane[plane_id]);
+ max_wm = g4x_fbc_fifo_size(level);
+
+ /*
+ * FBC wm is not mandatory as we
+ * can always just disable its use.
+ */
+ if (wm > max_wm)
+ wm = USHRT_MAX;
+
+ dirty |= raw->fbc != wm;
+ raw->fbc = wm;
+ }
+
+ /* mark watermarks as invalid */
+ dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ out:
+ if (dirty) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+
+ if (plane_id == PLANE_PRIMARY)
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
+}
+
+static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (level >= dev_priv->display.wm.num_levels)
+ return false;
+
+ return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void g4x_invalidate_wms(struct intel_crtc *crtc,
+ struct g4x_wm_state *wm_state, int level)
+{
+ if (level <= G4X_WM_LEVEL_NORMAL) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_SR) {
+ wm_state->cxsr = false;
+ wm_state->sr.cursor = USHRT_MAX;
+ wm_state->sr.plane = USHRT_MAX;
+ wm_state->sr.fbc = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_HPLL) {
+ wm_state->hpll_en = false;
+ wm_state->hpll.cursor = USHRT_MAX;
+ wm_state->hpll.plane = USHRT_MAX;
+ wm_state->hpll.fbc = USHRT_MAX;
+ }
+}
+
+static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
+ int level)
+{
+ if (level < G4X_WM_LEVEL_SR)
+ return false;
+
+ if (level >= G4X_WM_LEVEL_SR &&
+ wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
+ return false;
+
+ if (level >= G4X_WM_LEVEL_HPLL &&
+ wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
+ return false;
+
+ return true;
+}
+
+static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct g4x_pipe_wm *raw;
+ enum plane_id plane_id;
+ int level;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = raw->plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->sr.fbc = raw->fbc;
+
+ wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->hpll.fbc = raw->fbc;
+
+ wm_state->hpll_en = wm_state->cxsr;
+
+ level++;
+
+ out:
+ if (level == G4X_WM_LEVEL_NORMAL)
+ return -EINVAL;
+
+ /* invalidate the higher levels */
+ g4x_invalidate_wms(crtc, wm_state, level);
+
+ /*
+ * Determine if the FBC watermark(s) can be used. IF
+ * this isn't the case we prefer to disable the FBC
+ * watermark(s) rather than disable the SR/HPLL
+ * level(s) entirely. 'level-1' is the highest valid
+ * level here.
+ */
+ wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
+
+ return 0;
+}
+
+static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ if (!dirty)
+ return 0;
+
+ return _g4x_compute_pipe_wm(crtc_state);
+}
+
+static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
+ const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
+ enum plane_id plane_id;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ intermediate->hpll_en = false;
+ goto out;
+ }
+
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm.plane[plane_id] =
+ max(optimal->wm.plane[plane_id],
+ active->wm.plane[plane_id]);
+
+ drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ }
+
+ intermediate->sr.plane = max(optimal->sr.plane,
+ active->sr.plane);
+ intermediate->sr.cursor = max(optimal->sr.cursor,
+ active->sr.cursor);
+ intermediate->sr.fbc = max(optimal->sr.fbc,
+ active->sr.fbc);
+
+ intermediate->hpll.plane = max(optimal->hpll.plane,
+ active->hpll.plane);
+ intermediate->hpll.cursor = max(optimal->hpll.cursor,
+ active->hpll.cursor);
+ intermediate->hpll.fbc = max(optimal->hpll.fbc,
+ active->hpll.fbc);
+
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->cxsr = true;
+ wm->hpll_en = true;
+ wm->fbc_en = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+ if (!wm_state->hpll_en)
+ wm->hpll_en = false;
+ if (!wm_state->fbc_en)
+ wm->fbc_en = false;
+
+ num_active_pipes++;
+ }
+
+ if (num_active_pipes != 1) {
+ wm->cxsr = false;
+ wm->hpll_en = false;
+ wm->fbc_en = false;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm;
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr;
+ if (crtc->active && wm->hpll_en)
+ wm->hpll = wm_state->hpll;
+ }
+}
+
+static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values new_wm = {};
+
+ g4x_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ g4x_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void g4x_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64);
+
+ return ret;
+}
+
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ }
+}
+
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (dev_priv->display.wm.pri_latency[level] == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ /*
+ * FIXME the formula gives values that are
+ * too big for the cursor FIFO, and hence we
+ * would never be able to use cursors. For
+ * now just hardcode the watermark.
+ */
+ wm = 63;
+ } else {
+ wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
+ dev_priv->display.wm.pri_latency[level] * 10);
+ }
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
+{
+ return (active_planes & (BIT(PLANE_SPRITE0) |
+ BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
+}
+
+static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ const int fifo_size = 511;
+ int fifo_extra, fifo_left = fifo_size;
+ int sprite0_fifo_extra = 0;
+ unsigned int total_rate;
+ enum plane_id plane_id;
+
+ /*
+ * When enabling sprite0 after sprite1 has already been enabled
+ * we tend to get an underrun unless sprite0 already has some
+ * FIFO space allcoated. Hence we always allocate at least one
+ * cacheline for sprite0 whenever sprite1 is enabled.
+ *
+ * All other plane enable sequences appear immune to this problem.
+ */
+ if (vlv_need_sprite0_fifo_workaround(active_planes))
+ sprite0_fifo_extra = 1;
+
+ total_rate = raw->plane[PLANE_PRIMARY] +
+ raw->plane[PLANE_SPRITE0] +
+ raw->plane[PLANE_SPRITE1] +
+ sprite0_fifo_extra;
+
+ if (total_rate > fifo_size)
+ return -EINVAL;
+
+ if (total_rate == 0)
+ total_rate = 1;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ unsigned int rate;
+
+ if ((active_planes & BIT(plane_id)) == 0) {
+ fifo_state->plane[plane_id] = 0;
+ continue;
+ }
+
+ rate = raw->plane[plane_id];
+ fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
+ fifo_left -= fifo_state->plane[plane_id];
+ }
+
+ fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
+ fifo_left -= sprite0_fifo_extra;
+
+ fifo_state->plane[PLANE_CURSOR] = 63;
+
+ fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
+
+ /* spread the remainder evenly */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ int plane_extra;
+
+ if (fifo_left == 0)
+ break;
+
+ if ((active_planes & BIT(plane_id)) == 0)
+ continue;
+
+ plane_extra = min(fifo_extra, fifo_left);
+ fifo_state->plane[plane_id] += plane_extra;
+ fifo_left -= plane_extra;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+
+ /* give it all to the first plane if none are active */
+ if (active_planes == 0) {
+ drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ fifo_state->plane[PLANE_PRIMARY] = fifo_left;
+ }
+
+ return 0;
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void vlv_invalidate_wms(struct intel_crtc *crtc,
+ struct vlv_wm_state *wm_state, int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm[level].plane[plane_id] = USHRT_MAX;
+
+ wm_state->sr[level].cursor = USHRT_MAX;
+ wm_state->sr[level].plane = USHRT_MAX;
+ }
+}
+
+static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
+{
+ if (wm > fifo_size)
+ return USHRT_MAX;
+ else
+ return fifo_size - wm;
+}
+
+/*
+ * Starting from 'level' set all higher
+ * levels to 'value' in the "raw" watermarks.
+ */
+static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ int level;
+ bool dirty = false;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
+ int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+ }
+
+ /* mark all higher levels as invalid */
+ dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+out:
+ if (dirty)
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+
+ return raw->plane[plane_id] <= fifo_state->plane[plane_id];
+}
+
+static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
+{
+ return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ enum plane_id plane_id;
+ int level;
+
+ /* initially allow all levels */
+ wm_state->num_levels = dev_priv->display.wm.num_levels;
+ /*
+ * Note that enabling cxsr with no primary/sprite planes
+ * enabled can wedge the pipe. Hence we only allow cxsr
+ * with exactly one enabled primary/sprite plane.
+ */
+ wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+
+ if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
+ break;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ wm_state->wm[level].plane[plane_id] =
+ vlv_invert_wm_value(raw->plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
+ raw->plane[PLANE_SPRITE0],
+ raw->plane[PLANE_SPRITE1]),
+ sr_fifo_size);
+
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
+ 63);
+ }
+
+ if (level == 0)
+ return -EINVAL;
+
+ /* limit to only levels we can actually handle */
+ wm_state->num_levels = level;
+
+ /* invalidate the higher levels */
+ vlv_invalidate_wms(crtc, wm_state, level);
+
+ return 0;
+}
+
+static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ /*
+ * DSPARB registers may have been reset due to the
+ * power well being turned off. Make sure we restore
+ * them to a consistent state even if no primary/sprite
+ * planes are initially active. We also force a FIFO
+ * recomputation so that we are sure to sanitize the
+ * FIFO setting we took over from the BIOS even if there
+ * are no active planes on the crtc.
+ */
+ if (intel_crtc_needs_modeset(crtc_state))
+ dirty = ~0;
+
+ if (!dirty)
+ return 0;
+
+ /* cursor changes don't warrant a FIFO recompute */
+ if (dirty & ~BIT(PLANE_CURSOR)) {
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct vlv_fifo_state *old_fifo_state =
+ &old_crtc_state->wm.vlv.fifo_state;
+ const struct vlv_fifo_state *new_fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int ret;
+
+ ret = vlv_compute_fifo(crtc_state);
+ if (ret)
+ return ret;
+
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ memcmp(old_fifo_state, new_fifo_state,
+ sizeof(*new_fifo_state)) != 0)
+ crtc_state->fifo_changed = true;
+ }
+
+ return _vlv_compute_pipe_wm(crtc_state);
+}
+
+#define VLV_FIFO(plane, value) \
+ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
+
+ if (!crtc_state->fifo_changed)
+ return;
+
+ sprite0_start = fifo_state->plane[PLANE_PRIMARY];
+ sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
+ fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
+
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+
+ trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
+
+ /*
+ * uncore.lock serves a double purpose here. It allows us to
+ * use the less expensive I915_{READ,WRITE}_FW() functions, and
+ * it protects the DSPARB registers from getting clobbered by
+ * parallel updates from multiple pipes.
+ *
+ * intel_pipe_update_start() has already disabled interrupts
+ * for us, so a plain spin_lock() is sufficient here.
+ */
+ spin_lock(&uncore->lock);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+ VLV_FIFO(SPRITEB, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+ VLV_FIFO(SPRITEB, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+ VLV_FIFO(SPRITEB_HI, 0x1));
+ dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+ VLV_FIFO(SPRITED, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+ VLV_FIFO(SPRITED, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+ VLV_FIFO(SPRITED_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_C:
+ dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+ VLV_FIFO(SPRITEF, 0xff));
+ dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+ VLV_FIFO(SPRITEF, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+ VLV_FIFO(SPRITEF_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ default:
+ break;
+ }
+
+ intel_uncore_posting_read_fw(uncore, DSPARB);
+
+ spin_unlock(&uncore->lock);
+}
+
+#undef VLV_FIFO
+
+static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
+ const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
+ int level;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ goto out;
+ }
+
+ intermediate->num_levels = min(optimal->num_levels, active->num_levels);
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+
+ for (level = 0; level < intermediate->num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm[level].plane[plane_id] =
+ min(optimal->wm[level].plane[plane_id],
+ active->wm[level].plane[plane_id]);
+ }
+
+ intermediate->sr[level].plane = min(optimal->sr[level].plane,
+ active->sr[level].plane);
+ intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
+ active->sr[level].cursor);
+ }
+
+ vlv_invalidate_wms(crtc, intermediate, level);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->cxsr = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+
+ num_active_pipes++;
+ wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+ }
+
+ if (num_active_pipes != 1)
+ wm->cxsr = false;
+
+ if (num_active_pipes > 1)
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm[wm->level];
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr[wm->level];
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
+ }
+}
+
+static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values new_wm = {};
+
+ vlv_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, false);
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, false);
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void vlv_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void i965_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+ bool cxsr_enabled;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp = fb->format->cpp[0];
+ int entries;
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ crtc->base.cursor->state->crtc_w, 4,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size) +
+ i965_cursor_wm_info.guard_size;
+
+ cursor_sr = i965_cursor_wm_info.fifo_size - entries;
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ cxsr_enabled = true;
+ } else {
+ cxsr_enabled = false;
+ /* Turn off self refresh if both pipes are enabled */
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
+ /* update cursor SR watermark */
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+#undef FW_WM
+
+static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+ enum i9xx_plane_id i9xx_plane)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ if (plane->id == PLANE_PRIMARY &&
+ plane->i9xx_plane == i9xx_plane)
+ return intel_crtc_for_pipe(i915, plane->pipe);
+ }
+
+ return NULL;
+}
+
+static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+{
+ const struct intel_watermark_params *wm_info;
+ u32 fwater_lo;
+ u32 fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct intel_crtc *crtc;
+
+ if (IS_I945GM(dev_priv))
+ wm_info = &i945_wm_info;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i830_a_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planea_wm = fifo_size - wm_info->guard_size;
+ if (planea_wm > (long)wm_info->max_wm)
+ planea_wm = wm_info->max_wm;
+ }
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ wm_info = &i830_bc_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planeb_wm = fifo_size - wm_info->guard_size;
+ if (planeb_wm > (long)wm_info->max_wm)
+ planeb_wm = wm_info->max_wm;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (IS_I915GM(dev_priv) && crtc) {
+ struct drm_i915_gem_object *obj;
+
+ obj = intel_fb_obj(crtc->base.primary->state->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (!i915_gem_object_is_tiled(obj))
+ crtc = NULL;
+ }
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ intel_set_memory_cxsr(dev_priv, false);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev_priv) && crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp;
+ int entries;
+
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+
+ if (crtc)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void i845_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ u32 fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ &i845_wm_info,
+ i845_get_fifo_size(dev_priv, PLANE_A),
+ 4, pessimal_latency_ns);
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method1(pixel_rate, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
+{
+ /*
+ * Neither of these should be possible since this function shouldn't be
+ * called if the CRTC is off or the plane is invisible. But let's be
+ * extra paranoid to avoid a potential divide-by-zero if we screw up
+ * elsewhere in the driver.
+ */
+ if (WARN_ON(!cpp))
+ return 0;
+ if (WARN_ON(!horiz_pixels))
+ return 0;
+
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
+}
+
+struct ilk_wm_maximums {
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
+};
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value, bool is_lp)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+
+ if (!is_lp)
+ return method1;
+
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+}
+
+/* Only for WM_LP. */
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val)
+{
+ int cpp;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp);
+}
+
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 3072;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 768;
+ else
+ return 512;
+}
+
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ /* BDW primary/sprite plane watermarks */
+ return level == 0 ? 255 : 2047;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ return level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ return level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ return level == 0 ? 63 : 255;
+}
+
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+{
+ if (DISPLAY_VER(dev_priv) >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 31;
+ else
+ return 15;
+}
+
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
+
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
+
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
+ }
+
+ /* clamp to max that the registers can hold */
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ return ilk_cursor_wm_reg_max(dev_priv, level);
+}
+
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static bool ilk_validate_wm_level(int level,
+ const struct ilk_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
+ result->enable = result->pri_val <= max->pri &&
+ result->spr_val <= max->spr &&
+ result->cur_val <= max->cur;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+ const struct intel_crtc *crtc,
+ int level,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
+ struct intel_wm_level *result)
+{
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ if (pristate) {
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
+ pri_latency, level);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
+ }
+
+ if (sprstate)
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
+
+ if (curstate)
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
+
+ result->enable = true;
+}
+
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u64 sskpd;
+
+ i915->display.wm.num_levels = 5;
+
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
+
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
+
+ i915->display.wm.num_levels = 4;
+
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
+
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
+
+ i915->display.wm.num_levels = 3;
+
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5], u16 min)
+{
+ int level;
+
+ if (wm[0] >= min)
+ return false;
+
+ wm[0] = max(wm[0], min);
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
+
+ return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+{
+ bool changed;
+
+ /*
+ * The BIOS provided WM memory latency values are often
+ * inadequate for high resolution displays. Adjust them.
+ */
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+
+ if (!changed)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * On some SNB machines (Thinkpad X220 Tablet at least)
+ * LP3 usage can cause vblank interrupts to be lost.
+ * The DEIIR bit will go high but it looks like the CPU
+ * never gets interrupted.
+ *
+ * It's not clear whether other interrupt source could
+ * be affected or if this is somehow limited to vblank
+ * interrupts only. To play it safe we disable LP3
+ * watermarks entirely.
+ */
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
+ return;
+
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+
+ if (DISPLAY_VER(dev_priv) == 6) {
+ snb_wm_latency_quirk(dev_priv);
+ snb_wm_lp3_irq_quirk(dev_priv);
+ }
+}
+
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *pipe_wm)
+{
+ /* LP0 watermark maximums depend on this pipe alone */
+ const struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = pipe_wm->sprites_enabled,
+ .sprites_scaled = pipe_wm->sprites_scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Compute new watermarks for the pipe */
+static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_pipe_wm *pipe_wm;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
+ struct ilk_wm_maximums max;
+ int level, usable_level;
+
+ pipe_wm = &crtc_state->wm.ilk.optimal;
+
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
+ }
+
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
+ pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
+ pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
+
+ usable_level = dev_priv->display.wm.num_levels - 1;
+
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ usable_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
+
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
+
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ return -EINVAL;
+
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
+
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ pristate, sprstate, curstate, wm);
+
+ /*
+ * Disable any watermark level that exceeds the
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state. These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
+ const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
+ int level;
+
+ /*
+ * Start with the final, target watermarks, then combine with the
+ * currently active watermarks to get values that are safe both before
+ * and after the vblank.
+ */
+ *a = new_crtc_state->wm.ilk.optimal;
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state) ||
+ state->skip_intermediate_wm)
+ return 0;
+
+ a->pipe_enabled |= b->pipe_enabled;
+ a->sprites_enabled |= b->sprites_enabled;
+ a->sprites_scaled |= b->sprites_scaled;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct intel_wm_level *a_wm = &a->wm[level];
+ const struct intel_wm_level *b_wm = &b->wm[level];
+
+ a_wm->enable &= b_wm->enable;
+ a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+ a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+ a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+ a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
+ }
+
+ /*
+ * We need to make sure that these merged watermark values are
+ * actually a valid configuration themselves. If they're not,
+ * there's no safe way to transition from the old state to
+ * the new state, so we need to fail the atomic transaction.
+ */
+ if (!ilk_validate_pipe_wm(dev_priv, a))
+ return -EINVAL;
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+ int level,
+ struct intel_wm_level *ret_wm)
+{
+ const struct intel_crtc *crtc;
+
+ ret_wm->enable = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
+ const struct intel_wm_level *wm = &active->wm[level];
+
+ if (!active->pipe_enabled)
+ continue;
+
+ /*
+ * The watermark values may have been used in the past,
+ * so we must maintain them in the registers for some
+ * time even if the level is now disabled.
+ */
+ if (!wm->enable)
+ ret_wm->enable = false;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, num_levels = dev_priv->display.wm.num_levels;
+ int last_enabled_level = num_levels - 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ config->num_pipes_active > 1)
+ last_enabled_level = 0;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+
+ /* merge each WM1+ level */
+ for (level = 1; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev_priv, level, wm);
+
+ if (level > last_enabled_level)
+ wm->enable = false;
+ else if (!ilk_validate_wm_level(level, max, wm))
+ /* make sure all following levels get disabled */
+ last_enabled_level = level - 1;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ if (wm->enable)
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
+ }
+ }
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
+ dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ for (level = 2; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
+{
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ return 2 * level;
+ else
+ return dev_priv->display.wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct ilk_wm_values *results)
+{
+ struct intel_crtc *crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+
+ /* LP1+ register values */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ const struct intel_wm_level *r;
+
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+
+ /*
+ * Maintain the watermark values even if the level is
+ * disabled. Doing otherwise could cause underruns.
+ */
+ results->wm_lp[wm_lp - 1] =
+ WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_PRIMARY(r->pri_val) |
+ WM_LP_CURSOR(r->cur_val);
+
+ if (r->enable)
+ results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
+ else
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
+
+ results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
+
+ /*
+ * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
+ * level is disabled. Doing otherwise could cause underruns.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
+ }
+ }
+
+ /* LP0 register values */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+ const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
+
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ continue;
+
+ results->wm_pipe[pipe] =
+ WM0_PIPE_PRIMARY(r->pri_val) |
+ WM0_PIPE_SPRITE(r->spr_val) |
+ WM0_PIPE_CURSOR(r->cur_val);
+ }
+}
+
+/*
+ * Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same.
+ */
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
+{
+ int level, level1 = 0, level2 = 0;
+
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
+ }
+
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
+ return r2;
+ else
+ return r1;
+ } else if (level1 > level2) {
+ return r1;
+ } else {
+ return r2;
+ }
+}
+
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
+ previous->wm_lp[2] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
+ previous->wm_lp[1] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
+ previous->wm_lp[0] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ unsigned int dirty;
+
+ dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ if (!dirty)
+ return;
+
+ _ilk_disable_lp_wm(dev_priv, dirty);
+
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+
+ if (dirty & WM_DIRTY_DDB) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
+ else
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
+ }
+
+ if (dirty & WM_DIRTY_FBC)
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->display.wm.hw = *results;
+}
+
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
+{
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+ struct intel_wm_config *config)
+{
+ struct intel_crtc *crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct ilk_wm_maximums max;
+ struct intel_wm_config config = {};
+ struct ilk_wm_values results = {};
+ enum intel_ddb_partitioning partitioning;
+
+ ilk_compute_wm_config(dev_priv, &config);
+
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (DISPLAY_VER(dev_priv) >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ } else {
+ best_lp_wm = &lp_wm_1_2;
+ }
+
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+
+ ilk_write_wm_values(dev_priv, &results);
+}
+
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
+ enum pipe pipe = crtc->pipe;
+
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+
+ memset(active, 0, sizeof(*active));
+
+ active->pipe_enabled = crtc->active;
+
+ if (active->pipe_enabled) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
+ active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
+ active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
+ } else {
+ int level;
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ active->wm[level].enable = true;
+ }
+
+ crtc->wm.active.ilk = *active;
+}
+
+static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ return;
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ intel_state = to_intel_atomic_state(state);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ if (!HAS_GMCH(dev_priv))
+ intel_state->skip_intermediate_wm = true;
+
+ ret = ilk_sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
+
+ /* Write calculated watermark values back */
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ intel_optimize_watermarks(intel_state, crtc);
+
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
+ }
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+#define _FW_WM(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
+ wm->sr.fbc = _FW_WM(tmp, FBC_SR);
+ wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+ wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
+ wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
+}
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+ u32 tmp;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] =
+ (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_CURSOR] =
+ (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE0] =
+ (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE1] =
+ (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ }
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ } else {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct intel_crtc *crtc;
+
+ g4x_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_pipe_wm *raw;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level, max_level;
+
+ active->cxsr = wm->cxsr;
+ active->hpll_en = wm->hpll_en;
+ active->fbc_en = wm->fbc_en;
+
+ active->sr = wm->sr;
+ active->hpll = wm->hpll;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm.plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+ }
+
+ if (wm->cxsr && wm->hpll_en)
+ max_level = G4X_WM_LEVEL_HPLL;
+ else if (wm->cxsr)
+ max_level = G4X_WM_LEVEL_SR;
+ else
+ max_level = G4X_WM_LEVEL_NORMAL;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ raw->plane[plane_id] = active->wm.plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->sr.plane;
+ raw->plane[PLANE_CURSOR] = active->sr.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->sr.fbc;
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->hpll.plane;
+ raw->plane[PLANE_CURSOR] = active->hpll.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->hpll.fbc;
+
+ level++;
+ out:
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ g4x_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ g4x_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.g4x.optimal = *active;
+ crtc_state->wm.g4x.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
+ str_yes_no(wm->fbc_en));
+}
+
+static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.g4x.raw[level];
+
+ raw->plane[plane_id] = 0;
+
+ if (plane_id == PLANE_PRIMARY)
+ raw->fbc = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _g4x_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.g4x.intermediate =
+ crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ }
+
+ g4x_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+}
+
+static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct intel_crtc *crtc;
+ u32 val;
+
+ vlv_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (val & DSP_MAXFIFO_PM5_ENABLE)
+ wm->level = VLV_WM_LEVEL_PM5;
+
+ /*
+ * If DDR DVFS is disabled in the BIOS, Punit
+ * will never ack the request. So if that happens
+ * assume we don't have to enable/disable DDR DVFS
+ * dynamically. To test that just set the REQ_ACK
+ * bit to poke the Punit, but don't change the
+ * HIGH/LOW bits so that we don't actually change
+ * the current state.
+ */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ } else {
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ }
+
+ vlv_punit_put(dev_priv);
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level;
+
+ vlv_get_fifo_size(crtc_state);
+
+ active->num_levels = wm->level + 1;
+ active->cxsr = wm->cxsr;
+
+ for (level = 0; level < active->num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ active->sr[level].plane = wm->sr.plane;
+ active->sr[level].cursor = wm->sr.cursor;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm[level].plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+
+ raw->plane[plane_id] =
+ vlv_invert_wm_value(active->wm[level].plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+ }
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ vlv_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ vlv_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.vlv.optimal = *active;
+ crtc_state->wm.vlv.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
+static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ raw->plane[plane_id] = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _vlv_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.vlv.intermediate =
+ crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ }
+
+ vlv_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+}
+
+/*
+ * FIXME should probably kill this and improve
+ * the real watermark readout/sanitation instead
+ */
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+{
+ intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+}
+
+static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc *crtc;
+
+ ilk_init_lp_watermarks(dev_priv);
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
+static const struct intel_wm_funcs ilk_wm_funcs = {
+ .compute_pipe_wm = ilk_compute_pipe_wm,
+ .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .initial_watermarks = ilk_initial_watermarks,
+ .optimize_watermarks = ilk_optimize_watermarks,
+ .get_hw_state = ilk_wm_get_hw_state,
+};
+
+static const struct intel_wm_funcs vlv_wm_funcs = {
+ .compute_pipe_wm = vlv_compute_pipe_wm,
+ .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .initial_watermarks = vlv_initial_watermarks,
+ .optimize_watermarks = vlv_optimize_watermarks,
+ .atomic_update_watermarks = vlv_atomic_update_fifo,
+ .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs g4x_wm_funcs = {
+ .compute_pipe_wm = g4x_compute_pipe_wm,
+ .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .initial_watermarks = g4x_initial_watermarks,
+ .optimize_watermarks = g4x_optimize_watermarks,
+ .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs pnv_wm_funcs = {
+ .update_wm = pnv_update_wm,
+};
+
+static const struct intel_wm_funcs i965_wm_funcs = {
+ .update_wm = i965_update_wm,
+};
+
+static const struct intel_wm_funcs i9xx_wm_funcs = {
+ .update_wm = i9xx_update_wm,
+};
+
+static const struct intel_wm_funcs i845_wm_funcs = {
+ .update_wm = i845_update_wm,
+};
+
+static const struct intel_wm_funcs nop_funcs = {
+};
+
+void i9xx_wm_init(struct drm_i915_private *dev_priv)
+{
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
+ } else if (IS_G4X(dev_priv)) {
+ g4x_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ intel_set_memory_cxsr(dev_priv, false);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ } else {
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 3) {
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 2) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1)
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
+ else
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else {
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
new file mode 100644
index 000000000000..a7875cbcd05a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I9XX_WM_H__
+#define __I9XX_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+int ilk_wm_max_level(const struct drm_i915_private *i915);
+bool ilk_disable_lp_wm(struct drm_i915_private *i915);
+void ilk_wm_sanitize(struct drm_i915_private *i915);
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
+void i9xx_wm_init(struct drm_i915_private *i915);
+
+#endif /* __I9XX_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 468a792e6a40..4ff10b00ffbd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp, mode_flags;
+ u32 mode_flags;
enum port port;
mode_flags = crtc_state->mode_flags;
@@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
else
return;
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp |= DSI_FRAME_UPDATE_REQUEST;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
@@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
- u32 tmp;
+ u32 tmp, mask, val;
int lane;
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
+ mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
+ val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
+ RTERM_SELECT(0x6);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
-
+ mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK;
+ val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
+ RCOMP_SCALAR(0x98);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
-
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
-
- for (lane = 0; lane <= 3; lane++) {
- /* Bspec: must not use GRP register for write */
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK;
+ val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
+ CURSOR_COEFF(0x3f);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+
+ /* Bspec: must not use GRP register for write */
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ mask, val);
}
}
@@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
- dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
- dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp |= COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ 0, COMBO_PHY_MODE_DSI);
get_dsi_io_power_domains(dev_priv, intel_dsi);
}
@@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~LOADGEN_SELECT;
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
- for (lane = 0; lane <= 3; lane++) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~LOADGEN_SELECT;
- if (lane != 2)
- tmp |= LOADGEN_SELECT;
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~FRC_LATENCY_OPTIM_MASK;
- tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
+ FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
@@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~LATENCY_OPTIM_MASK;
- tmp |= LATENCY_OPTIM_VAL(0);
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
- tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
tmp = intel_de_read(dev_priv,
ICL_PORT_PCS_DW1_LN(0, phy));
@@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
}
/*
@@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- tmp |= SUS_CLOCK_CONFIG;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
}
/* Program swing and de-emphasis */
@@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
}
}
@@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
enum phy phy;
/* Program T-INIT master registers */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
- tmp &= ~DSI_T_INIT_MASTER_MASK;
- tmp |= intel_dsi->init_count;
- intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv,
- DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DPHY_TA_TIMING_PARAM(port),
- tmp);
+ intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
/* shadow register inside display core */
- tmp = intel_de_read(dev_priv,
- DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
}
}
}
if (IS_JSL_EHL(dev_priv)) {
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
- tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
+ 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
@@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp |= PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ 0, PORT_SYNC_MODE_ENABLE);
}
/* configure stream splitting */
@@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans),
+ HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans),
+ HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
}
}
@@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- intel_de_write(dev_priv, VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans),
+ VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
@@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans),
+ VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
}
}
@@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans),
vsync_shift);
}
}
- /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ /*
+ * program TRANS_VBLANK register, should be same as vtotal programmed
+ *
+ * FIXME get rid of these local hacks and do it right,
+ * this will not handle eg. delayed vblank correctly.
+ */
if (DISPLAY_VER(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans),
+ VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
}
}
}
@@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp |= PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(&dev_priv->drm,
"DSI transcoder not enabled\n");
}
@@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+ u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
/*
* escape clock count calculation:
@@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
- tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
- tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
+ HSTX_TIMEOUT_VALUE_MASK,
+ HSTX_TIMEOUT_VALUE(hs_tx_timeout));
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
- tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
- tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
+ LPRX_TIMEOUT_VALUE_MASK,
+ LPRX_TIMEOUT_VALUE(lp_rx_timeout));
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
- tmp &= ~TA_TIMEOUT_VALUE_MASK;
- tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
+ TA_TIMEOUT_VALUE_MASK,
+ TA_TIMEOUT_VALUE(ta_timeout));
}
}
@@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp &= ~PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(&dev_priv->drm,
"DSI trancoder not disabled\n");
}
@@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
+ DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
}
/* put dsi link in ULPS */
@@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
- tmp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans),
+ TRANS_DDI_FUNC_ENABLE, 0);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp &= ~PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ PORT_SYNC_MODE_ENABLE, 0);
}
}
}
@@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
@@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
/* set mode to DDI */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp &= ~COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ COMBO_PHY_MODE_DSI, 0);
}
static void gen11_dsi_disable(struct intel_atomic_state *state,
@@ -1574,7 +1500,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Get the details on which TE should be enabled */
if (is_cmd_mode(intel_dsi))
@@ -1754,8 +1680,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- ret = tmp & PIPECONF_ENABLE;
+ tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans));
+ ret = tmp & TRANSCONF_ENABLE;
}
out:
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 1409bcfb6fd3..40de9f0f171b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -32,18 +32,17 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include "gt/intel_rps.h"
-
#include "i915_config.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_watermark.h"
@@ -363,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->async_flip_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -582,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
intel_plane_is_scaled(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
new_crtc_state->do_async_flip = true;
+ new_crtc_state->async_flip_planes |= BIT(plane->id);
+ }
return 0;
}
@@ -938,62 +940,62 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
*/
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
- if (!dma_fence_is_i915(fence))
- return;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ }
- if (drm_crtc_vblank_get(crtc))
- return;
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
+ if (src_x % hsub || src_w % hsub) {
+ drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, str_yes_no(rotated));
+ return -EINVAL;
}
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
+ if (src_y % vsub || src_h % vsub) {
+ drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, str_yes_no(rotated));
+ return -EINVAL;
+ }
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+ return 0;
}
/**
@@ -1086,13 +1088,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
dma_resv_iter_begin(&cursor, obj->base.resv,
DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
}
dma_resv_iter_end(&cursor);
} else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -1103,10 +1105,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
- state->rps_interactive = true;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, true);
return 0;
@@ -1137,10 +1136,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (!obj)
return;
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
- state->rps_interactive = false;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, false);
/* Should only be called after a successful intel_prepare_plane_fb()! */
intel_plane_unpin_fb(old_plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 74b6d3b169a7..191dad0efc8e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -62,6 +62,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
bool can_position);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
void intel_plane_helper_add(struct intel_plane *plane);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index a9335c856644..3d5a9bbc6fde 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -581,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
@@ -592,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val = intel_de_read(i915, AUD_CONFIG_BE);
if (DISPLAY_VER(i915) == 11)
- val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
else if (DISPLAY_VER(i915) >= 12)
- val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+ val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
/* Get hblank early enable value required */
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder);
hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
if (hblank_early_prog < 32)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32);
else if (hblank_early_prog < 64)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64);
else if (hblank_early_prog < 96)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96);
else
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128);
/* Get samples room value required */
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder);
samples_room = calc_samples_room(crtc_state);
if (samples_room < 3)
- val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room);
else /* Program 0 i.e "All Samples available in buffer" */
- val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
intel_de_write(i915, AUD_CONFIG_BE, val);
@@ -812,9 +811,9 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!crtc_state->has_audio)
return;
@@ -832,7 +831,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
audio_state->encoder = encoder;
BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
@@ -842,14 +841,14 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld,
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -871,9 +870,9 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!old_crtc_state->has_audio)
return;
@@ -890,7 +889,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
audio_state->encoder = NULL;
memset(audio_state->eld, 0, sizeof(audio_state->eld));
@@ -899,27 +898,26 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
}
static void intel_acomp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
- enum pipe pipe = crtc->pipe;
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
if (audio_state->encoder)
memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
@@ -985,11 +983,7 @@ void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
{
- if (refclk == 24000)
- aud_ts->m = 12;
- else
- aud_ts->m = 15;
-
+ aud_ts->m = 60;
aud_ts->n = cdclk * aud_ts->m / 24000;
}
@@ -1147,27 +1141,27 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
}
/*
- * get the intel audio state according to the parameter port and pipe
- * MST & (pipe >= 0): return the audio.state[pipe].encoder],
+ * get the intel audio state according to the parameter port and cpu_transcoder
+ * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder],
* when port is matched
- * MST & (pipe < 0): this is invalid
- * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * MST & (cpu_transcoder < 0): this is invalid
+ * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry)
* will get the right intel_encoder with port matched
- * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
- int port, int pipe)
+ int port, int cpu_transcoder)
{
/* MST */
- if (pipe >= 0) {
+ if (cpu_transcoder >= 0) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
if (drm_WARN_ON(&i915->drm,
- pipe >= ARRAY_SIZE(i915->display.audio.state)))
+ cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
return NULL;
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1176,14 +1170,14 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
}
/* Non-MST */
- if (pipe > 0)
+ if (cpu_transcoder > 0)
return NULL;
- for_each_pipe(i915, pipe) {
+ for_each_cpu_transcoder(i915, cpu_transcoder) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1195,7 +1189,7 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
}
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int pipe, int rate)
+ int cpu_transcoder, int rate)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct i915_audio_component *acomp = i915->display.audio.component;
@@ -1211,7 +1205,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&i915->display.audio.mutex);
- audio_state = find_audio_state(i915, port, pipe);
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
if (!audio_state) {
drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
err = -ENODEV;
@@ -1223,7 +1217,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* FIXME stop using the legacy crtc pointer */
crtc = to_intel_crtc(encoder->base.crtc);
- /* port must be valid now, otherwise the pipe will be invalid */
+ /* port must be valid now, otherwise the cpu_transcoder will be invalid */
acomp->aud_sample_rate[port] = rate;
/* FIXME get rid of the crtc->config stuff */
@@ -1236,7 +1230,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- int pipe, bool *enabled,
+ int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1245,7 +1239,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
mutex_lock(&i915->display.audio.mutex);
- audio_state = find_audio_state(i915, port, pipe);
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
if (!audio_state) {
drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
mutex_unlock(&i915->display.audio.mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index a4e4b7f79e4d..2e8f17c04522 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -105,7 +105,8 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
@@ -283,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
}
@@ -345,27 +347,24 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
*/
tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -376,12 +375,10 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CTL2);
- intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -389,12 +386,10 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -402,19 +397,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
- if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(i915, UTIL_PIN_CTL, val);
- }
+ if (panel->backlight.controller == 1)
+ intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -422,13 +412,11 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
}
static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -458,7 +446,8 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* another client is not activated.
*/
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ connector->base.base.id, connector->base.name);
return;
}
@@ -478,30 +467,24 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
+ u32 pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915)) {
- schicken = intel_de_read(i915, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(i915, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN1, schicken);
- }
+ if (HAS_PCH_LPT(i915))
+ intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ LPT_PWM_GRANULARITY : 0);
+ else
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -533,14 +516,16 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
@@ -578,7 +563,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = intel_de_read(i915, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CTL, 0);
}
@@ -618,7 +604,8 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CTL2, ctl2);
}
@@ -653,7 +640,8 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
@@ -685,7 +673,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.controller == 1) {
val = intel_de_read(i915, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&i915->drm, "util pin already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
intel_de_write(i915, UTIL_PIN_CTL, val);
}
@@ -699,7 +688,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
@@ -1270,6 +1260,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1297,6 +1291,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1335,6 +1333,10 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1364,6 +1366,10 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1392,6 +1398,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
+ connector->base.base.id, connector->base.name, pipe_name(pipe));
+
return 0;
}
@@ -1428,6 +1438,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
@@ -1468,7 +1483,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
*/
panel->backlight.controller = connector->panel.vbt.backlight.controller;
if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
- drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ connector->base.base.id, connector->base.name,
panel->backlight.controller);
panel->backlight.controller = 0;
}
@@ -1490,6 +1506,11 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
@@ -1511,8 +1532,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&i915->drm, "Failed to get the %s PWM chip\n",
- desc);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1529,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
} else {
@@ -1538,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n",
- desc);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
+ connector->base.base.id, connector->base.name, desc);
+
return 0;
}
@@ -1582,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s
static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ int ret;
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0)
return ret;
@@ -1623,10 +1648,12 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (!connector->panel.vbt.backlight.present) {
if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT, but present per quirk\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
+ connector->base.base.id, connector->base.name);
} else {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
}
@@ -1642,16 +1669,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (ret) {
drm_dbg_kms(&i915->drm,
- "failed to setup backlight for connector %s\n",
- connector->base.name);
+ "[CONNECTOR:%d:%s] failed to setup backlight\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
panel->backlight.present = true;
drm_dbg_kms(&i915->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->base.name,
+ "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
+ connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 04b846440de6..75e69dffc5e9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -141,8 +141,8 @@ struct bdb_block_entry {
};
static const void *
-find_section(struct drm_i915_private *i915,
- enum bdb_block_id section_id)
+bdb_find_section(struct drm_i915_private *i915,
+ enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
@@ -201,7 +201,7 @@ static size_t lfp_data_min_size(struct drm_i915_private *i915)
const struct bdb_lvds_lfp_data_ptrs *ptrs;
size_t size;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@@ -630,7 +630,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_options *lvds_options;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return -1;
@@ -671,11 +671,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
dump_pnp_id(i915, edid_id, "EDID");
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return -1;
@@ -791,7 +791,7 @@ parse_panel_options(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
int drrs_mode;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -881,11 +881,11 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return;
@@ -932,7 +932,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
if (i915->display.vbt.version < 229)
return;
- generic_dtd = find_section(i915, BDB_GENERIC_DTD);
+ generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@@ -1011,7 +1011,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
- backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
+ backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -1084,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
+ if (i915->display.vbt.version >= 239)
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout =
+ DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
+ else
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
+
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
@@ -1113,14 +1119,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
+ dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS);
if (!dtds)
return;
@@ -1156,7 +1162,7 @@ parse_general_features(struct drm_i915_private *i915)
{
const struct bdb_general_features *general;
- general = find_section(i915, BDB_GENERAL_FEATURES);
+ general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
if (!general)
return;
@@ -1202,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
static void
parse_sdvo_device_mapping(struct drm_i915_private *i915)
{
- struct sdvo_device_mapping *mapping;
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
int count = 0;
/*
@@ -1217,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ struct sdvo_device_mapping *mapping;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -1280,7 +1285,7 @@ parse_driver_features(struct drm_i915_private *i915)
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1317,7 +1322,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1357,7 +1362,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
if (i915->display.vbt.version < 228)
return;
- power = find_section(i915, BDB_LFP_POWER);
+ power = bdb_find_section(i915, BDB_LFP_POWER);
if (!power)
return;
@@ -1397,7 +1402,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
- edp = find_section(i915, BDB_EDP);
+ edp = bdb_find_section(i915, BDB_EDP);
if (!edp)
return;
@@ -1527,7 +1532,7 @@ parse_psr(struct drm_i915_private *i915,
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
- psr = find_section(i915, BDB_PSR);
+ psr = bdb_find_section(i915, BDB_PSR);
if (!psr) {
drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
@@ -1688,7 +1693,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Parse #52 for panel index used from panel_type already
* parsed
*/
- start = find_section(i915, BDB_MIPI_CONFIG);
+ start = bdb_find_section(i915, BDB_MIPI_CONFIG);
if (!start) {
drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
@@ -2000,7 +2005,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = find_section(i915, BDB_MIPI_SEQUENCE);
+ sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
if (!sequence) {
drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
@@ -2075,14 +2080,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
u16 block_size;
int index;
if (i915->display.vbt.version < 198)
return;
- params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
@@ -2100,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
@@ -2226,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2292,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2522,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
-static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
@@ -2533,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
-static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 244)
return 0;
@@ -2587,7 +2591,7 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
}
-static bool
+bool
intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
{
return intel_bios_encoder_supports_dp(devdata) &&
@@ -2600,7 +2604,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
}
-static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+bool
+intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+}
+
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
@@ -2608,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
return devdata->child.hdmi_level_shifter_value;
}
-static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
@@ -2666,37 +2677,37 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
drm_dbg_kms(&i915->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
- HAS_LSPCON(i915) && child->lspcon,
+ intel_bios_encoder_is_lspcon(devdata),
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
- max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
- dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
- hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
@@ -2781,7 +2792,7 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
- defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
+ defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
if (!defs) {
drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");
@@ -2811,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915)
expected_size = 37;
} else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->display.vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 250) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -3306,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel)
bool intel_bios_is_tv_present(struct drm_i915_private *i915)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (!i915->display.vbt.int_tv_support)
return false;
@@ -3315,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/*
* If the device type is not TV, continue.
@@ -3349,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (list_empty(&i915->display.vbt.display_devices))
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -3397,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
+
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->display.vbt.ports[port];
-}
+ if (!is_port_valid(i915, port))
+ return false;
-/**
- * intel_bios_is_port_edp - is the device in given port eDP
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if the device in %port is eDP.
- */
-bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
-{
- const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ const struct child_device_config *child = &devdata->child;
+
+ if (dvo_port_to_port(i915, child->dvo_port) == port)
+ return true;
+ }
- return devdata && intel_bios_encoder_supports_edp(devdata);
+ return false;
}
static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
@@ -3457,17 +3463,14 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- u8 dvo_port;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = child->dvo_port;
-
if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
@@ -3554,10 +3557,9 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
@@ -3576,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
return false;
}
-/**
- * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if HPD should be inverted for %port.
- */
-bool
-intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port)
+static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- if (drm_WARN_ON_ONCE(&i915->drm,
- !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
- return false;
-
- return devdata && devdata->child.hpd_invert;
-}
-
-/**
- * intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if LSPCON is present on this port
- */
-bool
-intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
-}
-
-/**
- * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if port requires lane reversal
- */
-bool
-intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return devdata && devdata->child.lane_reversal;
-}
-
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
- if (!devdata || !devdata->child.aux_channel) {
- aux_ch = (enum aux_ch)port;
-
- drm_dbg_kms(&i915->drm,
- "using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
/*
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
* map to DDI A,B,TC1,TC2 respectively.
@@ -3650,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (devdata->child.aux_channel) {
+ switch (aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -3711,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(devdata->child.aux_channel);
+ MISSING_CASE(aux_channel);
aux_ch = AUX_CH_A;
break;
}
- drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
return aux_ch;
}
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+ if (!devdata || !devdata->child.aux_channel)
+ return AUX_CH_NONE;
- return _intel_bios_max_tmds_clock(devdata);
+ return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
-/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_hdmi_level_shift(devdata);
-}
-
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3747,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
return translate_iboost(devdata->child.dp_iboost_level);
}
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3755,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
return translate_iboost(devdata->child.hdmi_iboost_level);
}
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_link_rate(devdata);
-}
-
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_lane_count(devdata);
-}
-
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3792,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.lane_reversal;
+}
+
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.hpd_invert;
+}
+
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d221f784aa88..8a0730c9b48c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -38,6 +38,7 @@ struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
struct intel_panel;
+enum aux_ch;
enum port;
enum intel_backlight_type {
@@ -248,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
@@ -272,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 1c236f02b380..202321ffbe2a 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 qgv_points = 0, psf_points = 0;
+
+ /*
+ * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+ * it with failure if we try masking any unadvertised points.
+ * So need to operate only with those returned from PCode.
+ */
+ if (num_qgv_points > 0)
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
+
+ if (num_psf_gv_points > 0)
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
+
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
+}
+
+static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+{
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ ICL_PCODE_REQ_QGV_PT_MASK);
+}
+
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask)
{
@@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return ret;
}
+ dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ I915_SAGV_ENABLED : I915_SAGV_DISABLED;
+
return 0;
}
@@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
-{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
- u16 qgv_points = 0, psf_points = 0;
-
- /*
- * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
- * it with failure if we try masking any unadvertised points.
- * So need to operate only with those returned from PCode.
- */
- if (num_qgv_points > 0)
- qgv_points = GENMASK(num_qgv_points - 1, 0);
-
- if (num_psf_gv_points > 0)
- psf_points = GENMASK(num_psf_gv_points - 1, 0);
-
- return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 7e16b655c833..084a483f9776 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1329,6 +1329,30 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals rplu_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
{ .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
@@ -1801,6 +1825,13 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
return true;
}
+static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
+{
+ return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) &&
+ dev_priv->display.cdclk.hw.vco > 0 &&
+ HAS_CDCLK_SQUASH(dev_priv));
+}
+
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1815,9 +1846,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11)
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ /* wa_15010685871: dg2, mtl */
+ if (pll_enable_wa_needed(dev_priv))
+ dg2_cdclk_squash_program(dev_priv, 0);
+
icl_cdclk_pll_update(dev_priv, vco);
- else
+ } else
bxt_cdclk_pll_update(dev_priv, vco);
waveform = cdclk_squash_waveform(dev_priv, cdclk);
@@ -3353,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
+ else if (IS_ADLP_RPLU(dev_priv))
+ dev_priv->display.cdclk.table = rplu_cdclk_table;
else
dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 8d97c299e657..36aac88143ac 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -47,6 +47,11 @@ struct intel_color_funcs {
*/
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
+ * Perform any extra tasks needed after all the
+ * double buffered registers have been latched.
+ */
+ void (*color_post_update)(const struct intel_crtc_state *crtc_state);
+ /*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
* following the latching of any double buffered registers
@@ -257,7 +262,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(i915) >= 11)
return false;
- /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */
+ /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
return false;
@@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Despite Wa_1406463849, ICL no longer suffers from the SKL
+ * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
+ * Possibly due to the extra sticky CSC arming
+ * (see icl_color_post_update()).
+ *
+ * On TGL+ all CSC arming issues have been properly fixed.
+ */
icl_load_csc_matrix(crtc_state);
}
+static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * Possibly related to display WA #1184, SKL CSC loses the latched
+ * CSC coeff/offset register values if the CSC registers are disarmed
+ * between DC5 exit and PSR exit. This will cause the plane(s) to
+ * output all black (until CSC_MODE is rearmed and properly latched).
+ * Once PSR exit (and proper register latching) has occurred the
+ * danger is over. Thus when PSR is enabled the CSC coeff/offset
+ * register programming will be peformed from skl_color_commit_arm()
+ * which is called after PSR exit.
+ */
+ if (!crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@@ -624,7 +653,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
@@ -633,7 +662,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
@@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
+ if (crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
+static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black.
+ */
+ intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+
+ intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /*
+ * Despite Wa_1406463849, ICL CSC is no longer disarmed by
+ * coeff/offset register *writes*. Instead, once CSC_MODE
+ * is armed it stays armed, even after it has been latched.
+ * Afterwards the coeff/offset registers become effectively
+ * self-arming. That self-arming must be disabled before the
+ * next icl_color_commit_noarm() tries to write the next set
+ * of coeff/offset registers. Fortunately register *reads*
+ * do still disarm the CSC. Naturally this must not be done
+ * until the previously written CSC registers have actually
+ * been latched.
+ *
+ * TGL+ no longer need this workaround.
+ */
+ intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
+}
+
static struct drm_property_blob *
create_linear_lut(struct drm_i915_private *i915, int lut_size)
{
@@ -1256,8 +1329,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
}
- if (crtc_state->dsb)
- intel_dsb_commit(crtc_state->dsb);
+ if (crtc_state->dsb) {
+ intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_commit(crtc_state->dsb, false);
+ intel_dsb_wait(crtc_state->dsb);
+ }
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1373,6 +1449,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
}
+void intel_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (i915->display.funcs.color->color_post_update)
+ i915->display.funcs.color->color_post_update(crtc_state);
+}
+
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1380,6 +1464,9 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
/* FIXME DSB has issues loading LUTs, disable it for now */
return;
+ if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
+ return;
+
crtc_state->dsb = intel_dsb_prepare(crtc, 1024);
}
@@ -1500,6 +1587,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
/* plane control register changes blocked by CxSR */
if (HAS_GMCH(i915))
@@ -3064,10 +3153,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.lut_equal = i9xx_lut_equal,
};
+static const struct intel_color_funcs tgl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit_noarm = icl_color_commit_noarm,
+ .color_commit_arm = icl_color_commit_arm,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+ .lut_equal = icl_lut_equal,
+};
+
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
- .color_commit_arm = skl_color_commit_arm,
+ .color_commit_arm = icl_color_commit_arm,
+ .color_post_update = icl_color_post_update,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
@@ -3075,7 +3174,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
@@ -3084,7 +3183,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = bdw_read_luts,
@@ -3180,7 +3279,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
else
i915->display.funcs.color = &i9xx_color_funcs;
} else {
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 12)
+ i915->display.funcs.color = &tgl_color_funcs;
+ else if (DISPLAY_VER(i915) == 11)
i915->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(i915) == 10)
i915->display.funcs.color = &glk_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index d620b5b1e2a6..8002492be709 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 8b870b2dd4f9..922a6d87b553 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum phy phy)
{
const struct icl_procmon *procmon;
- u32 val;
procmon = icl_get_procmon_ref_values(dev_priv, phy);
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ (0xff << 16) | 0xff, procmon->dw1);
intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
@@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
- DCC_MODE_SELECT_MASK,
- DCC_MODE_SELECT_CONTINUOSLY);
+ DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
ret &= icl_verify_procmon_ref_values(dev_priv, phy);
@@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
- u32 val;
if (is_dsi) {
drm_WARN_ON(&dev_priv->drm, lane_reversal);
@@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
- val &= ~PWR_DOWN_LN_MASK;
- val |= lane_mask;
- intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ PWR_DOWN_LN_MASK, lane_mask);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -360,25 +353,19 @@ skip_phy_misc:
val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
- val |= DCC_MODE_SELECT_CONTINUOSLY;
+ val |= RUN_DCC_ONCE;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
}
icl_set_procmon_ref_values(dev_priv, phy);
- if (phy_is_master(dev_priv, phy)) {
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
- val |= IREFGEN;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
- }
-
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val |= COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ if (phy_is_master(dev_priv, phy))
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ 0, IREFGEN);
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- val |= CL_POWER_DOWN_ENABLE;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, CL_POWER_DOWN_ENABLE);
}
}
@@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
enum phy phy;
for_each_combo_phy_reverse(dev_priv, phy) {
- u32 val;
-
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy)) {
if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
@@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val &= ~COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 2ed65193ca19..b0983edccf3f 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -90,8 +90,8 @@
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
-#define DCC_MODE_SELECT_MASK (0x3 << 20)
-#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
+#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
+#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 7267ffc7f539..8f2ebead0826 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -260,7 +260,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
@@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
}
static void hsw_enable_crt(struct intel_atomic_state *state,
@@ -678,10 +678,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
+intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
@@ -693,25 +694,25 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
- save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe));
- save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe));
- vblank = intel_de_read(dev_priv, VBLANK(pipe));
+ save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder));
+ save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
- vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
- vactive = (save_vtotal & 0x7ff) + 1;
+ vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1;
+ vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1;
- vblank_start = (vblank & 0xfff) + 1;
- vblank_end = ((vblank >> 16) & 0xfff) + 1;
+ vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1;
+ vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1;
/* Set the border color to purple. */
- intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050);
if (DISPLAY_VER(dev_priv) != 2) {
- u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe));
+ u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- intel_de_write(dev_priv, PIPECONF(pipe),
- pipeconf | PIPECONF_FORCE_BORDER);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ transconf | TRANSCONF_FORCE_BORDER);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/* Wait for next Vblank to substitue
* border color for Color info */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@@ -720,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
connector_status_connected :
connector_status_disconnected;
- intel_de_write(dev_priv, PIPECONF(pipe), pipeconf);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -730,12 +731,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv, VSYNC(pipe));
- u32 vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1;
vblank_start = vsync_start;
- intel_de_write(dev_priv, VBLANK(pipe),
- (vblank_start - 1) | ((vblank_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(vblank_start - 1) |
+ VBLANK_END(vblank_end - 1));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -766,7 +768,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/* restore vblank if necessary */
if (restore_vblank)
- intel_de_write(dev_priv, VBLANK(pipe), vblank);
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -779,7 +781,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 82be0fbe9934..ed45a6934854 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -25,6 +25,7 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -211,7 +212,7 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
static int intel_crtc_late_register(struct drm_crtc *crtc)
{
- intel_crtc_debugfs_add(crtc);
+ intel_crtc_debugfs_add(to_intel_crtc(crtc));
return 0;
}
@@ -314,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
crtc->plane_ids_mask |= BIT(primary->id);
+ intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -683,6 +686,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
+ /*
+ * Seamless M/N update may need to update frame timings.
+ *
+ * FIXME Should be synchronized with the start of vblank somehow...
+ */
+ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+ intel_crtc_update_active_timings(new_crtc_state);
+
local_irq_enable();
if (intel_vgpu_active(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 2422d6ef5777..766633566fd6 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -14,14 +14,16 @@
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
+ drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d190fa0d393b..31bef0427377 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,7 +21,6 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_watermark.h"
/* Cursor formats */
@@ -532,9 +531,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 254559abedfb..73240cf78c8b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -47,6 +47,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -64,9 +65,9 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -89,7 +90,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
{
int level;
- level = intel_bios_hdmi_level_shift(encoder);
+ level = intel_bios_hdmi_level_shift(encoder->devdata);
if (level < 0)
level = trans->hdmi_default_entry;
@@ -126,7 +127,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_dp_boost_level(encoder->devdata))
+ intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -158,7 +159,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -644,19 +645,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
- u32 tmp;
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (enable)
- tmp |= hdcp_mask;
- else
- tmp &= ~hdcp_mask;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -948,8 +944,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
main_link_aux_power_domain_get(dig_port, crtc_state);
}
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -957,33 +953,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 13)
- val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
- val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
- else
- val = TRANS_CLK_SEL_PORT(encoder->port);
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
- }
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
+ else
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
- else
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
- }
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_DISABLED;
+ else
+ val = TRANS_CLK_SEL_DISABLED;
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -1009,9 +1006,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
+ iboost = intel_bios_hdmi_boost_level(encoder->devdata);
else
- iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
+ iboost = intel_bios_dp_boost_level(encoder->devdata);
if (iboost == 0) {
const struct intel_ddi_buf_trans *trans;
@@ -2200,15 +2197,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val |= DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ 0, DP_TP_CTL_FEC_ENABLE);
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2216,15 +2211,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_FEC_ENABLE, 0);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
@@ -2387,7 +2380,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
if (HAS_DP20(dev_priv))
intel_ddi_config_transcoder_dp2(encoder, crtc_state);
@@ -2514,7 +2507,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2526,6 +2519,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ if (HAS_DP20(dev_priv))
+ intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
+ crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
@@ -2556,7 +2553,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -2622,12 +2619,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
- }
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE, 0);
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -2660,19 +2654,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- u32 val;
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_MODE_SELECT_MASK);
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
+ 0);
}
} else {
if (!is_mst)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
@@ -2683,7 +2672,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* transcoder"
*/
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
@@ -2709,12 +2698,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
old_crtc_state, old_conn_state);
if (DISPLAY_VER(dev_priv) < 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
@@ -3153,8 +3142,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
wait = true;
}
- dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
@@ -3222,12 +3210,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- u32 val;
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -3559,6 +3544,37 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+{
+ return pll->info->id == DPLL_ID_ICL_TBTPLL;
+}
+
+static enum icl_port_dpll_id
+icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return ICL_PORT_DPLL_DEFAULT;
+
+ if (icl_ddi_tc_pll_is_tbt(pll))
+ return ICL_PORT_DPLL_DEFAULT;
+ else
+ return ICL_PORT_DPLL_MG_PHY;
+}
+
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!encoder->port_pll_type)
+ return ICL_PORT_DPLL_DEFAULT;
+
+ return encoder->port_pll_type(encoder, crtc_state);
+}
+
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
@@ -3571,7 +3587,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- if (pll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(pll))
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
else
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
@@ -3584,7 +3600,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
else
crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
@@ -3626,7 +3642,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_tc(i915, phy))
- intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
+ intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
+ crtc_state);
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
@@ -4305,7 +4322,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_bios_encoder_supports_hdmi(devdata);
init_dp = intel_bios_encoder_supports_dp(devdata);
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(devdata)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
@@ -4420,6 +4437,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_combo_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4432,6 +4450,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_tc_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4500,18 +4519,28 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ if (intel_bios_encoder_lane_reversal(devdata))
dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(encoder);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
+ if (!is_legacy && init_hdmi) {
+ is_legacy = !init_dp;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
+ port_name(port),
+ str_yes_no(init_dp),
+ is_legacy ? "legacy" : "non-legacy");
+ }
+
intel_tc_port_init(dig_port, is_legacy);
encoder->update_prepare = intel_ddi_update_prepare;
@@ -4521,35 +4550,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
- if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
- goto err;
-
- dig_port->hpd_pulse = intel_dp_hpd_pulse;
-
- if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
- }
-
- /* In theory we don't need the encoder->type check, but leave it just in
- * case we have some really bad VBTs... */
- if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
- goto err;
- }
-
if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
- IS_BROXTON(dev_priv))
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ dig_port->connected = bdw_digital_port_connected;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ dig_port->connected = lpt_digital_port_connected;
+ } else if (IS_BROADWELL(dev_priv)) {
+ if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else {
+ } else if (IS_HASWELL(dev_priv)) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -4558,6 +4573,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(dig_port))
+ goto err;
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ }
+
+ /*
+ * In theory we don't need the encoder->type check,
+ * but leave it just in case we have some really bad VBTs...
+ */
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(dig_port))
+ goto err;
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index d39076facdce..c85e74ae68e4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -40,6 +40,9 @@ void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
@@ -52,9 +55,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index d3994e2a7d63..5a386c7c0bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,6 +55,7 @@
#include "i915_reg.h"
#include "i915_utils.h"
#include "i9xx_plane.h"
+#include "i9xx_wm.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -94,6 +95,7 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
#include "intel_modeset_verify.h"
#include "intel_overlay.h"
@@ -109,13 +111,14 @@
#include "intel_quirks.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_tv.h"
#include "intel_vblank.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vga.h"
#include "intel_vrr.h"
+#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -127,104 +130,9 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @dev_priv: i915 device
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->display.funcs.wm->update_wm)
- dev_priv->display.funcs.wm->update_wm(dev_priv);
-}
-
-static int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_pipe_wm)
- return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
- return 0;
-}
-
-static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
- return 0;
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.funcs.wm->compute_pipe_wm))
- return 0;
- return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
-}
-
-static bool intel_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->initial_watermarks) {
- dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
- return true;
- }
- return false;
-}
-
-static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->atomic_update_watermarks)
- dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
-}
-
-static void intel_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->optimize_watermarks)
- dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
-}
-
-static int intel_compute_global_watermarks(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_global_watermarks)
- return dev_priv->display.funcs.wm->compute_global_watermarks(state);
- return 0;
-}
-
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
@@ -293,11 +201,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
}
/* Wa_2006604312:icl,ehl */
@@ -306,11 +214,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -395,8 +301,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
- PIPECONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -417,8 +323,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
- cur_state = !!(val & PIPECONF_ENABLE);
+ u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
+ cur_state = !!(val & TRANSCONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
@@ -530,15 +436,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if (val & PIPECONF_ENABLE) {
+ if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, reg);
/*
@@ -569,9 +475,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
return;
/*
@@ -579,11 +485,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
* so best keep it disabled when not needed.
*/
if (old_crtc_state->double_wide)
- val &= ~PIPECONF_DOUBLE_WIDE;
+ val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!IS_I830(dev_priv))
- val &= ~PIPECONF_ENABLE;
+ val &= ~TRANSCONF_ENABLE;
if (DISPLAY_VER(dev_priv) >= 14)
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
@@ -593,7 +499,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
intel_de_write(dev_priv, reg, val);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -1209,6 +1115,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
+
+ if (intel_crtc_needs_color_update(new_crtc_state))
+ intel_color_post_update(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1252,7 +1161,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- u8 update_planes = new_crtc_state->update_planes;
+ u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
+ ~new_crtc_state->async_flip_planes;
const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
bool need_vbl_wait = false;
@@ -1261,7 +1171,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (plane->need_async_flip_disable_wa &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id)) {
+ disable_async_flip_planes & BIT(plane->id)) {
/*
* Apart from the async flip bit we want to
* preserve the old state for the plane.
@@ -1378,7 +1288,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
intel_crtc_async_flip_disable_wa(state, crtc);
}
@@ -1801,12 +1711,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
enum transcoder transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
CHICKEN_TRANS(transcoder);
- u32 val;
- val = intel_de_read(dev_priv, reg);
- val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg,
+ HSW_FRAME_START_DELAY_MASK,
+ HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
@@ -1846,7 +1754,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
intel_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1887,7 +1795,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
!transcoder_is_dsi(cpu_transcoder))
@@ -2233,6 +2141,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
+ intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
@@ -2819,12 +2729,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
+ crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -2841,23 +2753,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
vsyncshift += adjusted_mode->crtc_htotal;
}
+ /*
+ * VBLANK_START no longer works on ADL+, instead we must use
+ * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
+
if (DISPLAY_VER(dev_priv) > 3)
- intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
- vsyncshift);
-
- intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- intel_de_write(dev_priv, HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- intel_de_write(dev_priv, HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
- intel_de_write(dev_priv, VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
- intel_de_write(dev_priv, VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
+ HTOTAL(adjusted_mode->crtc_htotal - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
+ HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
+ HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(crtc_vblank_start - 1) |
+ VBLANK_END(crtc_vblank_end - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
+ VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -2865,9 +2798,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, VTOTAL(pipe),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
-
+ intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2895,9 +2828,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
@@ -2906,43 +2839,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
+ adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
+ adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
+
+ tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
+ /* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
+ adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ adjusted_mode->crtc_vtotal += 1;
+ adjusted_mode->crtc_vblank_end += 1;
}
+
+ if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
}
static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -2982,7 +2919,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf = 0;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
/*
* - We keep both pipes enabled on 830
@@ -2990,18 +2928,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
- pipeconf |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
+ val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
- pipeconf |= PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN |
+ TRANSCONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
default:
@@ -3009,13 +2947,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- pipeconf |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- pipeconf |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- pipeconf |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
}
}
@@ -3023,23 +2961,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (DISPLAY_VER(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
- pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
} else {
- pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
+ val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
- pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
- pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
- intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
@@ -3140,20 +3078,20 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static enum intel_output_format
-bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
+bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- if (tmp & PIPEMISC_YUV420_ENABLE) {
+ if (tmp & PIPE_MISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
- } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
return INTEL_OUTPUT_FORMAT_YCBCR444;
} else {
return INTEL_OUTPUT_FORMAT_RGB;
@@ -3198,20 +3136,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
default:
@@ -3221,12 +3159,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ (tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
@@ -3236,7 +3174,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
if (DISPLAY_VER(dev_priv) < 4)
- pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+ pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -3306,7 +3244,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
/*
@@ -3314,7 +3252,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
switch (crtc_state->pipe_bpp) {
default:
@@ -3322,26 +3260,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- val |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- val |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
case 36:
- val |= PIPECONF_BPC_12;
+ val |= TRANSCONF_BPC_12;
break;
}
if (crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
/*
* This would end up with an odd purple hue over
@@ -3352,18 +3290,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range &&
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
@@ -3378,25 +3316,25 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (IS_HASWELL(dev_priv) && crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
if (IS_HASWELL(dev_priv) &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
- intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -3404,18 +3342,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_BPC_6;
+ val |= PIPE_MISC_BPC_6;
break;
case 24:
- val |= PIPEMISC_BPC_8;
+ val |= PIPE_MISC_BPC_8;
break;
case 30:
- val |= PIPEMISC_BPC_10;
+ val |= PIPE_MISC_BPC_10;
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
if (DISPLAY_VER(dev_priv) > 12)
- val |= PIPEMISC_BPC_12_ADLP;
+ val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -3423,38 +3361,38 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->dither)
- val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+ val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPEMISC_YUV420_ENABLE |
- PIPEMISC_YUV420_MODE_FULL_BLEND;
+ val |= PIPE_MISC_YUV420_ENABLE |
+ PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
- val |= PIPEMISC_HDR_MODE_PRECISION;
+ val |= PIPE_MISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
- val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+ val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
- intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
+ intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
}
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- switch (tmp & PIPEMISC_BPC_MASK) {
- case PIPEMISC_BPC_6:
+ switch (tmp & PIPE_MISC_BPC_MASK) {
+ case PIPE_MISC_BPC_6:
return 18;
- case PIPEMISC_BPC_8:
+ case PIPE_MISC_BPC_8:
return 24;
- case PIPEMISC_BPC_10:
+ case PIPE_MISC_BPC_10:
return 30;
/*
* PORT OUTPUT 12 BPC defined for ADLP+.
@@ -3466,7 +3404,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
* on older platforms, need to find a workaround for 12 BPC
* MIPI DSI HW readout.
*/
- case PIPEMISC_BPC_12_ADLP:
+ case PIPE_MISC_BPC_12_ADLP:
if (DISPLAY_VER(dev_priv) > 12)
return 36;
fallthrough;
@@ -3618,33 +3556,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->shared_dpll = NULL;
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
- case PIPECONF_BPC_12:
+ case TRANSCONF_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
- if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
- switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
- case PIPECONF_OUTPUT_COLORSPACE_YUV601:
- case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
break;
default:
@@ -3652,11 +3590,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
break;
}
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+ pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
@@ -3933,9 +3871,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
- return tmp & PIPECONF_ENABLE;
+ return tmp & TRANSCONF_ENABLE;
}
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
@@ -4039,15 +3977,15 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_HASWELL(dev_priv)) {
u32 tmp = intel_de_read(dev_priv,
- PIPECONF(pipe_config->cpu_transcoder));
+ TRANSCONF(pipe_config->cpu_transcoder));
- if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
else
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
pipe_config->output_format =
- bdw_get_pipemisc_output_format(crtc);
+ bdw_get_pipe_misc_output_format(crtc);
}
pipe_config->gamma_mode = intel_de_read(dev_priv,
@@ -4090,7 +4028,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
- PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5145,6 +5083,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -5439,6 +5378,20 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
}
}
+/* Returns the length up to and including the last differing byte */
+static size_t
+memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (a[i] != b[i])
+ return i + 1;
+ }
+
+ return 0;
+}
+
static void
pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -5448,6 +5401,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
drm_dbg_kms(&dev_priv->drm,
"fastset mismatch in %s buffer\n", name);
print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
@@ -5455,6 +5411,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
16, 0, b, len, false);
} else {
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
@@ -5943,73 +5902,13 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
return ret;
crtc_state->update_planes |= crtc_state->active_planes;
+ crtc_state->async_flip_planes = 0;
+ crtc_state->do_async_flip = false;
}
return 0;
}
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode adjusted_mode;
-
- drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
-
- if (crtc_state->vrr.enable) {
- adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- }
-
- drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
-
- crtc->mode_flags = crtc_state->mode_flags;
-
- /*
- * The scanline counter increments at the leading edge of hsync.
- *
- * On most platforms it starts counting from vtotal-1 on the
- * first active line. That means the scanline counter value is
- * always one less than what we would expect. Ie. just after
- * start of vblank, which also occurs at start of hsync (on the
- * last active line), the scanline counter will read vblank_start-1.
- *
- * On gen2 the scanline counter starts counting from 1 instead
- * of vtotal-1, so we have to subtract one (or rather add vtotal-1
- * to keep the value positive), instead of adding one.
- *
- * On HSW+ the behaviour of the scanline counter depends on the output
- * type. For DP ports it behaves like most other platforms, but on HDMI
- * there's an extra 1 line difference. So we need to add two instead of
- * one to the value.
- *
- * On VLV/CHV DSI the scanline counter would appear to increment
- * approx. 1/3 of a scanline before start of vblank. Unfortunately
- * that means we can't tell whether we're in vblank or not while
- * we're on that particular line. We must still set scanline_offset
- * to 1 so that the vblank timestamps come out correct when we query
- * the scanline counter from within the vblank interrupt handler.
- * However if queried just before the start of vblank we'll get an
- * answer that's slightly in the future.
- */
- if (DISPLAY_VER(dev_priv) == 2) {
- int vtotal;
-
- vtotal = adjusted_mode.crtc_vtotal;
- if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- vtotal /= 2;
-
- crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- crtc->scanline_offset = 2;
- } else {
- crtc->scanline_offset = 1;
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6695,8 +6594,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
* @dev: drm device
* @_state: state to validate
*/
-static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *_state)
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
@@ -7014,7 +6913,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
@@ -7090,6 +6989,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
+ drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state);
@@ -7457,8 +7358,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
- if (state->modeset)
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ /*
+ * During full modesets we write a lot of registers, wait
+ * for PLLs, etc. Doing that while DC states are enabled
+ * is not a good idea.
+ *
+ * During fastsets and other updates we also need to
+ * disable DC states due to the following scenario:
+ * 1. DC5 exit and PSR exit happen
+ * 2. Some or all _noarm() registers are written
+ * 3. Due to some long delay PSR is re-entered
+ * 4. DC5 entry -> DMC saves the already written new
+ * _noarm() registers and the old not yet written
+ * _arm() registers
+ * 5. DC5 exit -> DMC restores a mixture of old and
+ * new register values and arms the update
+ * 6. PSR exit -> hardware latches a mixture of old and
+ * new register values -> corrupted frame, or worse
+ * 7. New _arm() registers are finally written
+ * 8. Hardware finally latches a complete set of new
+ * register values, and subsequent frames will be OK again
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@@ -7607,8 +7528,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@@ -8356,124 +8277,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
-static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(state->dev, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc_state->hw.active) {
- /*
- * Preserve the inherited flag to avoid
- * taking the full modeset path.
- */
- crtc_state->inherited = true;
- }
- }
-
- drm_for_each_plane(plane, state->dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-/*
- * Calculate what we think the watermarks should be for the state we've read
- * out of the hardware and then immediately program those watermarks so that
- * we ensure the hardware settings match our internal state.
- *
- * We can calculate what we think WM's should be by creating a duplicate of the
- * current state (which was constructed during hardware readout) and running it
- * through the atomic check code to calculate new watermark values in the
- * state object.
- */
-static void sanitize_watermarks(struct drm_i915_private *dev_priv)
-{
- struct drm_atomic_state *state;
- struct intel_atomic_state *intel_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
-
- /* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
- return;
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return;
-
- intel_state = to_intel_atomic_state(state);
-
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- state->acquire_ctx = &ctx;
-
- /*
- * Hardware readout is the only time we don't want to calculate
- * intermediate watermarks (since we don't trust the current
- * watermarks).
- */
- if (!HAS_GMCH(dev_priv))
- intel_state->skip_intermediate_wm = true;
-
- ret = sanitize_watermarks_add_affected(state);
- if (ret)
- goto fail;
-
- ret = intel_atomic_check(&dev_priv->drm, state);
- if (ret)
- goto fail;
-
- /* Write calculated watermark values back */
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- crtc_state->wm.need_postvbl_update = true;
- intel_optimize_watermarks(intel_state, crtc);
-
- to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
- }
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- drm_WARN(&dev_priv->drm, ret,
- "Could not determine valid watermarks for inherited state\n");
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -8634,12 +8437,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
+ ret = intel_power_domains_init(i915);
+ if (ret < 0)
+ goto cleanup_vga;
+
intel_power_domains_init_hw(i915, false);
if (!HAS_DISPLAY(i915))
return 0;
- intel_dmc_ucode_init(i915);
+ intel_dmc_init(i915);
i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -8674,8 +8481,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
cleanup_vga_client_pw_domain_dmc:
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
+cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
intel_bios_driver_remove(i915);
@@ -8694,7 +8502,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
- intel_init_pm(i915);
+ intel_wm_init(i915);
intel_panel_sanitize_ssc(i915);
@@ -8750,7 +8558,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(i915);
+ ilk_wm_sanitize(i915);
return 0;
}
@@ -8791,6 +8599,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -8817,13 +8626,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(640 - 1) | HTOTAL(800 - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(480 - 1) | VTOTAL(525 - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
intel_de_write(dev_priv, FP0(pipe), fp);
intel_de_write(dev_priv, FP1(pipe), fp);
@@ -8854,8 +8670,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -8878,8 +8694,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
- intel_de_write(dev_priv, PIPECONF(pipe), 0);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), 0);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -9000,7 +8816,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
@@ -9035,14 +8851,14 @@ void intel_display_driver_register(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_display_debugfs_register(i915);
-
/* Must be done after probing outputs */
intel_opregion_register(i915);
intel_acpi_video_register(i915);
intel_audio_init(i915);
+ intel_display_debugfs_register(i915);
+
/*
* Some ports require correctly set-up hpd registers for
* detection to work properly (leading to ghost connected
@@ -9051,7 +8867,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* enabled. We do it last so that the async config cannot run
* before the connectors are registered.
*/
- intel_fbdev_initial_config_async(&i915->drm);
+ intel_fbdev_initial_config_async(i915);
/*
* We need to coordinate the hotplugs with the asynchronous
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index cb6f520cc575..596fd3ec1983 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -32,6 +32,7 @@
enum drm_scaling_filter;
struct dpll;
+struct drm_atomic_state;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
@@ -171,6 +172,8 @@ enum tc_port_mode {
};
enum aux_ch {
+ AUX_CH_NONE = -1,
+
AUX_CH_A,
AUX_CH_B,
AUX_CH_C,
@@ -394,6 +397,7 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
@@ -418,7 +422,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
@@ -507,7 +510,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index fb8670aa2932..0b5509f268a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -19,13 +19,12 @@
#include "intel_cdclk.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
-#include "intel_dmc.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct drm_property;
@@ -40,6 +39,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dmc;
struct intel_dpll_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
@@ -85,6 +85,7 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
+ void (*get_hw_state)(struct drm_i915_private *i915);
};
struct intel_audio_state {
@@ -102,7 +103,7 @@ struct intel_audio {
u32 freq_cntrl;
/* current audio state for the audio component hooks */
- struct intel_audio_state state[I915_MAX_PIPES];
+ struct intel_audio_state state[I915_MAX_TRANSCODERS];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
@@ -182,6 +183,17 @@ struct intel_hotplug {
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
+
+ /*
+ * Flag to track if long HPDs need not to be processed
+ *
+ * Some panels generate long HPDs while keep connected to the port.
+ * This can cause issues with CI tests results. In CI systems we
+ * don't expect to disconnect the panels and could ignore the long
+ * HPDs generated from the faulty panels. This flag can be used as
+ * cue to ignore the long HPDs and can be set / unset using debugfs.
+ */
+ bool ignore_long_hpd;
};
struct intel_vbt_data {
@@ -243,7 +255,7 @@ struct intel_wm {
struct g4x_wm_values g4x;
};
- u8 max_level;
+ u8 num_levels;
/*
* Should be held around atomic WM register writing; also
@@ -340,6 +352,11 @@ struct intel_display {
} dkl;
struct {
+ struct intel_dmc *dmc;
+ intel_wakeref_t wakeref;
+ } dmc;
+
+ struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base;
} dsi;
@@ -378,9 +395,15 @@ struct intel_display {
} gmbus;
struct {
- struct i915_hdcp_comp_master *master;
+ struct i915_hdcp_master *master;
bool comp_added;
+ /*
+ * HDCP message struct for allocation of memory which can be
+ * reused when sending message to gsc cs.
+ * this is only populated post Meteorlake
+ */
+ struct intel_hdcp_gsc_message *hdcp_message;
/* Mutex to protect the above hdcp component related values. */
struct mutex comp_mutex;
} hdcp;
@@ -466,7 +489,6 @@ struct intel_display {
/* Grouping using named structs. Keep sorted. */
struct intel_audio audio;
- struct intel_dmc dmc;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 7bcd90384a46..cc5026272558 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "hsw_ips.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -26,10 +27,9 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "skl_watermark.h"
+#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -49,33 +49,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(dev_priv->params.enable_ips));
-
- if (DISPLAY_VER(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -169,269 +142,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
- int ret;
-
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const char *status = "unknown";
- u32 val, status_val;
-
- if (intel_dp->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(intel_dp->psr.transcoder));
- status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(intel_dp->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
- if (psr->sink_support)
- seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- str_yes_no(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- str_enabled_disabled(enabled), val);
- psr_source_status(intel_dp, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
-
- seq_printf(m, "PSR2 selective fetch: %s\n",
- str_enabled_disabled(psr->psr2_sel_fetch_enabled));
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_dp *intel_dp = NULL;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- /* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- intel_dp = enc_to_intel_dp(encoder);
- break;
- }
-
- if (!intel_dp)
- return -ENODEV;
-
- return intel_psr_status(m, intel_dp);
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
- int ret = -ENODEV;
-
- if (!HAS_PSR(dev_priv))
- return ret;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- // TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- }
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- // TODO: split to each transcoder's PSR debug state
- *val = READ_ONCE(intel_dp->psr.debug);
- return 0;
- }
-
- return -ENODEV;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -832,10 +542,10 @@ static const struct file_operations crtc_updates_fops = {
.write = crtc_updates_write
};
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
- debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
- to_intel_crtc(crtc), &crtc_updates_fops);
+ debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry,
+ crtc, &crtc_updates_fops);
}
#else
@@ -845,7 +555,7 @@ static void crtc_updates_info(struct seq_file *m,
{
}
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
}
#endif
@@ -1282,237 +992,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(&dev_priv->drm);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(&dev_priv->drm);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1574,12 +1053,10 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
@@ -1593,13 +1070,9 @@ static const struct {
const struct file_operations *fops;
} intel_display_debugfs_files[] = {
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
void intel_display_debugfs_register(struct drm_i915_private *i915)
@@ -1619,10 +1092,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ hsw_ips_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
- skl_watermark_ipc_debugfs_register(i915);
+ intel_psr_debugfs_register(i915);
+ intel_wm_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1674,16 +1149,6 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-static int i915_psr_status_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
-
- return intel_psr_status(m, intel_dp);
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -1901,7 +1366,7 @@ static const struct file_operations i915_dsc_bpc_fops = {
*/
static int i915_current_bpc_show(struct seq_file *m, void *data)
{
- struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc *crtc = m->private;
struct intel_crtc_state *crtc_state;
int ret;
@@ -1918,6 +1383,17 @@ static int i915_current_bpc_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+/* Pipe may differ from crtc index if pipes are fused off */
+static int intel_crtc_pipe_show(struct seq_file *m, void *unused)
+{
+ struct intel_crtc *crtc = m->private;
+
+ seq_printf(m, "%c\n", pipe_name(crtc->pipe));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -1936,19 +1412,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
return;
intel_drrs_connector_debugfs_add(intel_connector);
+ intel_psr_connector_debugfs_add(intel_connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (HAS_PSR(dev_priv) &&
- connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_psr_status", 0444, root,
- connector, &i915_psr_status_fops);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
@@ -1983,15 +1451,19 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
*
* Failure to add debugfs entries should generally be ignored.
*/
-void intel_crtc_debugfs_add(struct drm_crtc *crtc)
+void intel_crtc_debugfs_add(struct intel_crtc *crtc)
{
- if (!crtc->debugfs_entry)
+ struct dentry *root = crtc->base.debugfs_entry;
+
+ if (!root)
return;
crtc_updates_add(crtc);
- intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc));
- intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+ intel_drrs_crtc_debugfs_add(crtc);
+ intel_fbc_crtc_debugfs_add(crtc);
- debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ debugfs_create_file("i915_current_bpc", 0444, root, crtc,
&i915_current_bpc_fops);
+ debugfs_create_file("i915_pipe", 0444, root, crtc,
+ &intel_crtc_pipe_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index d3a79c07c384..e1f479b7acd1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_crtc;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
void intel_connector_debugfs_add(struct intel_connector *connector);
-void intel_crtc_debugfs_add(struct drm_crtc *crtc);
+void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
-static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
+static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1a23ecd4623a..f86060195987 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -264,9 +264,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+sanitize_target_dc_state(struct drm_i915_private *i915,
u32 target_dc_state)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -279,7 +280,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
+ if (power_domains->allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->display.dmc.target_dc_state)
+ if (state == power_domains->target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -323,7 +324,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->display.dmc.target_dc_state = state;
+ power_domains->target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -992,10 +993,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->display.dmc.allowed_dc_mask =
+ power_domains->allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->display.dmc.target_dc_state =
+ power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1260,9 +1261,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -1306,9 +1305,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
@@ -1347,15 +1344,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
@@ -1363,25 +1356,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_LP_PARTITION_LEVEL_DISABLE);
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
i915_reg_t reg;
- u32 reset_bits, val;
+ u32 reset_bits;
if (IS_IVYBRIDGE(dev_priv)) {
reg = GEN7_MSG_CTL;
@@ -1394,14 +1383,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- val = intel_de_read(dev_priv, reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -1580,10 +1562,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- /* Wa_1409767108:tgl,dg1,adl-s */
+ IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -1618,7 +1598,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1646,6 +1625,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
+
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
@@ -1670,11 +1653,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
- DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val);
- }
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) >= 13)
@@ -1700,6 +1682,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
+
/*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already
@@ -2055,7 +2041,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2244,22 +2230,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
void intel_display_power_resume(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
+ if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
+ else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2154d900b1aa..8e96be8e6330 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -137,6 +137,10 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+
intel_wakeref_t init_wakeref;
intel_wakeref_t disable_wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8710dd41ffd4..1676df1dc066 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -333,7 +333,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
@@ -356,9 +355,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
@@ -380,17 +377,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
hsw_power_well_pre_disable(dev_priv,
power_well->desc->irq_pipe_mask);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_EDP &&
+ encoder->port == port)
+ return true;
+ }
+
+ return false;
+}
+
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -398,29 +405,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
+ !intel_port_is_edp(dev_priv, (enum port)phy))
+ intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
+ 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
}
static void
@@ -430,17 +430,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -502,19 +497,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- u32 val;
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+ intel_de_rmw(dev_priv, regs->driver,
+ 0,
+ HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
/*
* An AUX timeout is expected if the TBT DP tunnel is down,
@@ -700,19 +691,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+void gen9_sanitize_dc_state(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
u32 val;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->display.dmc.dc_state, val);
- dev_priv->display.dmc.dc_state = val;
+ power_domains->dc_state, val);
+ power_domains->dc_state = val;
}
/**
@@ -740,6 +732,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
*/
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 val;
u32 mask;
@@ -747,8 +740,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->display.dmc.allowed_dc_mask))
- state &= dev_priv->display.dmc.allowed_dc_mask;
+ state & ~power_domains->allowed_dc_mask))
+ state &= power_domains->allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -756,16 +749,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->display.dmc.dc_state)
+ if ((val & mask) != power_domains->dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->display.dmc.dc_state, val & mask);
+ power_domains->dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->display.dmc.dc_state = val & mask;
+ power_domains->dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -776,12 +769,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
+ intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -820,8 +809,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -847,8 +836,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -957,9 +946,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -998,10 +988,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->display.dmc.target_dc_state) {
+ switch (power_domains->target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1033,9 +1025,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1049,8 +1041,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1149,18 +1141,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 val;
-
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1276,8 +1264,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1289,8 +1276,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
vlv_set_power_well(dev_priv, power_well, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 02605418ff08..755c1ea8225c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -13,7 +13,7 @@
#define VLV_DISPLAY_BASE 0x180000
/*
- * Named helper wrappers around _PICK_EVEN() and _PICK().
+ * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES().
*/
#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
@@ -29,12 +29,8 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
-#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__))
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
new file mode 100644
index 000000000000..918d0327169a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_vblank.h>
+
+#include "gt/intel_rps.h"
+#include "i915_drv.h"
+#include "intel_display_rps.h"
+#include "intel_display_types.h"
+
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+ if (state->rps_interactive == interactive)
+ return;
+
+ intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
+ state->rps_interactive = interactive;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
new file mode 100644
index 000000000000..e19009c2371a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_RPS_H__
+#define __INTEL_DISPLAY_RPS_H__
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_atomic_state;
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence);
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive);
+
+#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 54c517ca9632..ab146b5b68bd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -43,7 +43,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
-#include <drm/i915_mei_hdcp_interface.h>
+#include <drm/i915_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_vma.h"
@@ -53,7 +53,7 @@
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_printer;
struct __intel_global_objs_state;
@@ -255,6 +255,11 @@ struct intel_encoder {
* Returns whether the port clock is enabled or not.
*/
bool (*is_clock_enabled)(struct intel_encoder *encoder);
+ /*
+ * Returns the PLL type the port uses.
+ */
+ enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
@@ -326,6 +331,7 @@ struct intel_vbt_panel_data {
struct {
u16 pwm_freq_hz;
u16 brightness_precision_bits;
+ u16 hdr_dpcd_refresh_timeout;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
@@ -1249,6 +1255,9 @@ struct intel_crtc_state {
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ /* bitmask of planes with async flip active */
+ u8 async_flip_planes;
+
u8 framestart_delay; /* 1-4 */
u8 msa_timing_delay; /* 0-3 */
@@ -1502,17 +1511,6 @@ struct intel_watermark_params {
u8 cacheline_size;
};
-struct cxsr_latency {
- bool is_desktop : 1;
- bool is_ddr3 : 1;
- u16 fsb_freq;
- u16 mem_freq;
- u16 display_sr;
- u16 display_hpll_disable;
- u16 cursor_sr;
- u16 cursor_hpll_disable;
-};
-
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
@@ -1631,6 +1629,8 @@ struct intel_psr {
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@@ -1788,6 +1788,7 @@ struct intel_digital_port {
bool tc_legacy_port:1;
char tc_port_name[8];
enum tc_port_mode tc_mode;
+ enum tc_port_mode tc_init_mode;
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 257aa2b7cf20..8a88de67ff0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -38,6 +38,39 @@
* low-power state and comes back to normal.
*/
+enum intel_dmc_id {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
+ DMC_FW_MAX
+};
+
+struct intel_dmc {
+ struct drm_i915_private *i915;
+ struct work_struct work;
+ const char *fw_path;
+ u32 max_fw_size; /* bytes */
+ u32 version;
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+};
+
+/* Note: This may be NULL. */
+static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
+{
+ return i915->display.dmc.dmc;
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
@@ -56,10 +89,13 @@
__stringify(major) "_" \
__stringify(minor) ".bin"
+#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
-
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define MTL_DMC_PATH DMC_PATH(mtl)
+MODULE_FIRMWARE(MTL_DMC_PATH);
+
#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
@@ -249,9 +285,19 @@ struct stepping_info {
char substepping;
};
-static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+#define for_each_dmc_id(__dmc_id) \
+ for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
+
+static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
{
- return i915->display.dmc.dmc_info[dmc_id].payload;
+ return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
+}
+
+static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ return dmc && dmc->dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -270,12 +316,12 @@ intel_get_stepping_info(struct drm_i915_private *i915,
return si;
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
{
/* The below bit doesn't need to be cleared ever afterwards */
- intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ intel_de_rmw(i915, DC_STATE_DEBUG, 0,
DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
+ intel_de_posting_read(i915, DC_STATE_DEBUG);
}
static void disable_event_handler(struct drm_i915_private *i915,
@@ -315,26 +361,23 @@ disable_flip_queue_event(struct drm_i915_private *i915,
}
static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
{
- switch (dmc_id) {
- case DMC_FW_MAIN:
+ if (dmc_id == DMC_FW_MAIN) {
if (DISPLAY_VER(i915) == 12) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
return true;
}
- break;
- case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
if (IS_DG2(i915)) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
return true;
}
- break;
}
return false;
@@ -343,13 +386,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
static void
disable_all_flip_queue_events(struct drm_i915_private *i915)
{
- int dmc_id;
+ enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
return;
- for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ for_each_dmc_id(dmc_id) {
i915_reg_t ctl_reg;
i915_reg_t htp_reg;
@@ -365,34 +408,31 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
- int id;
+ enum intel_dmc_id dmc_id;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(i915) < 12)
return;
- for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ for_each_dmc_id(dmc_id) {
int handler;
- if (!has_dmc_id_fw(i915, id))
+ if (!has_dmc_id_fw(i915, dmc_id))
continue;
for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
disable_event_handler(i915,
- DMC_EVT_CTL(i915, id, handler),
- DMC_EVT_HTP(i915, id, handler));
+ DMC_EVT_CTL(i915, dmc_id, handler),
+ DMC_EVT_HTP(i915, dmc_id, handler));
}
}
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
- if (DISPLAY_VER(i915) < 13)
- return;
-
/*
- * Wa_16015201720:adl-p,dg2, mtl
+ * Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
* For pipe C and D clock gating needs to be disabled only
@@ -408,9 +448,30 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
+static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_16015201720
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ */
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+ MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ if (DISPLAY_VER(i915) >= 14 && enable)
+ mtl_pipedmc_clock_gating_wa(i915);
+ else if (DISPLAY_VER(i915) == 13)
+ adlp_pipedmc_clock_gating_wa(i915, enable);
+}
+
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
- if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
return;
if (DISPLAY_VER(i915) >= 14)
@@ -421,7 +482,9 @@ void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
- if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
return;
if (DISPLAY_VER(i915) >= 14)
@@ -432,57 +495,59 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
/**
* intel_dmc_load_program() - write the firmware from memory to register.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
*
* DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_dmc_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
- u32 id, i;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
+ u32 i;
- if (!intel_dmc_has_payload(dev_priv))
+ if (!intel_dmc_has_payload(i915))
return;
- pipedmc_clock_gating_wa(dev_priv, true);
+ pipedmc_clock_gating_wa(i915, true);
- disable_all_event_handlers(dev_priv);
+ disable_all_event_handlers(i915);
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
preempt_disable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
- intel_de_write_fw(dev_priv,
- DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
- dmc->dmc_info[id].payload[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(i915,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
}
}
preempt_enable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
- intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
- dmc->dmc_info[id].mmiodata[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]);
}
}
- dev_priv->display.dmc.dc_state = 0;
+ power_domains->dc_state = 0;
- gen9_set_dc_state_debugmask(dev_priv);
+ gen9_set_dc_state_debugmask(i915);
/*
* Flip queue events need to be disabled before enabling DC5/6.
* i915 doesn't use the flip queue feature, so disable it already
* here.
*/
- disable_all_flip_queue_events(dev_priv);
+ disable_all_flip_queue_events(i915);
- pipedmc_clock_gating_wa(dev_priv, false);
+ pipedmc_clock_gating_wa(i915, false);
}
/**
@@ -504,8 +569,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
void assert_dmc_loaded(struct drm_i915_private *i915)
{
- drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
+ drm_WARN_ONCE(&i915->drm, dmc &&
+ !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -540,15 +608,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
const struct stepping_info *si,
u8 package_ver)
{
- unsigned int i, id;
-
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
+ enum intel_dmc_id dmc_id;
+ unsigned int i;
for (i = 0; i < num_entries; i++) {
- id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
+ dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
- if (id >= DMC_FW_MAX) {
- drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ if (!is_valid_dmc_id(dmc_id)) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
continue;
}
@@ -556,29 +624,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
* check for the stepping since we already found a previous FW
* for this id.
*/
- if (dmc->dmc_info[id].present)
+ if (dmc->dmc_info[dmc_id].present)
continue;
if (fw_info_matches_stepping(&fw_info[i], si)) {
- dmc->dmc_info[id].present = true;
- dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
+ dmc->dmc_info[dmc_id].present = true;
+ dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
}
}
}
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
- int header_ver, u8 dmc_id)
+ int header_ver, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 start_range, end_range;
int i;
- if (dmc_id >= DMC_FW_MAX) {
- drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
- return false;
- }
-
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
@@ -606,9 +669,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
- size_t rem_size, u8 dmc_id)
+ size_t rem_size, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -719,7 +782,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -773,7 +836,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -793,18 +856,17 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
+ struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
- const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
+ const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
+ enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
- int id;
if (!fw)
return;
@@ -825,34 +887,33 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
- for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->display.dmc.dmc_info[id].present)
+ for_each_dmc_id(dmc_id) {
+ if (!dmc->dmc_info[dmc_id].present)
continue;
- offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
if (offset > fw->size) {
- drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ drm_err(&i915->drm, "Reading beyond the fw_size\n");
continue;
}
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
- parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
}
-static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
- dev_priv->display.dmc.wakeref =
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+ i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
-static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->display.dmc.wakeref);
+ fetch_and_zero(&i915->display.dmc.wakeref);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
}
static const char *dmc_fallback_path(struct drm_i915_private *i915)
@@ -865,46 +926,40 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915)
static void dmc_load_work_fn(struct work_struct *work)
{
- struct drm_i915_private *dev_priv;
- struct intel_dmc *dmc;
+ struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
+ struct drm_i915_private *i915 = dmc->i915;
const struct firmware *fw = NULL;
const char *fallback_path;
int err;
- dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
- dmc = &dev_priv->display.dmc;
-
- err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
+ err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
- if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) {
- fallback_path = dmc_fallback_path(dev_priv);
+ if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ fallback_path = dmc_fallback_path(i915);
if (fallback_path) {
- drm_dbg_kms(&dev_priv->drm,
- "%s not found, falling back to %s\n",
- dmc->fw_path,
- fallback_path);
- err = request_firmware(&fw, fallback_path, dev_priv->drm.dev);
+ drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
+ dmc->fw_path, fallback_path);
+ err = request_firmware(&fw, fallback_path, i915->drm.dev);
if (err == 0)
- dev_priv->display.dmc.fw_path = fallback_path;
+ dmc->fw_path = fallback_path;
}
}
- parse_dmc_fw(dev_priv, fw);
+ parse_dmc_fw(dmc, fw);
- if (intel_dmc_has_payload(dev_priv)) {
- intel_dmc_load_program(dev_priv);
- intel_dmc_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(i915)) {
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
- drm_info(&dev_priv->drm,
- "Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
- drm_notice(&dev_priv->drm,
+ drm_notice(&i915->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
dmc->fw_path);
- drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
+ drm_notice(&i915->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -912,19 +967,17 @@ static void dmc_load_work_fn(struct work_struct *work)
}
/**
- * intel_dmc_ucode_init() - initialize the firmware loading.
- * @dev_priv: i915 drm device.
+ * intel_dmc_init() - initialize the firmware loading.
+ * @i915: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_init(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
-
- INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
+ struct intel_dmc *dmc;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
@@ -935,168 +988,195 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_dmc_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(i915);
+
+ dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return;
+
+ dmc->i915 = i915;
+
+ INIT_WORK(&dmc->work, dmc_load_work_fn);
- if (IS_DG2(dev_priv)) {
+ if (IS_METEORLAKE(i915)) {
+ dmc->fw_path = MTL_DMC_PATH;
+ dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
dmc->fw_path = DG2_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (IS_ALDERLAKE_P(i915)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (IS_ALDERLAKE_S(i915)) {
dmc->fw_path = ADLS_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(dev_priv)) {
+ } else if (IS_DG1(i915)) {
dmc->fw_path = DG1_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (IS_ROCKETLAKE(i915)) {
dmc->fw_path = RKL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(dev_priv)) {
+ } else if (IS_TIGERLAKE(i915)) {
dmc->fw_path = TGL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(i915) == 11) {
dmc->fw_path = ICL_DMC_PATH;
dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(i915)) {
dmc->fw_path = GLK_DMC_PATH;
dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
dmc->fw_path = KBL_DMC_PATH;
dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915)) {
dmc->fw_path = SKL_DMC_PATH;
dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
dmc->fw_path = BXT_DMC_PATH;
dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
- if (dev_priv->params.dmc_firmware_path) {
- if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- dmc->fw_path = NULL;
- drm_info(&dev_priv->drm,
+ if (i915->params.dmc_firmware_path) {
+ if (strlen(i915->params.dmc_firmware_path) == 0) {
+ drm_info(&i915->drm,
"Disabling DMC firmware and runtime PM\n");
- return;
+ goto out;
}
- dmc->fw_path = dev_priv->params.dmc_firmware_path;
+ dmc->fw_path = i915->params.dmc_firmware_path;
}
if (!dmc->fw_path) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
- return;
+ goto out;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->display.dmc.work);
+ i915->display.dmc.dmc = dmc;
+
+ drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dmc->work);
+
+ return;
+
+out:
+ kfree(dmc);
}
/**
- * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
- * @dev_priv: i915 drm device
+ * intel_dmc_suspend() - prepare DMC firmware before system suspend
+ * @i915: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_suspend(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ if (!HAS_DMC(i915))
return;
- flush_work(&dev_priv->display.dmc.work);
+ if (dmc)
+ flush_work(&dmc->work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_put(i915);
}
/**
- * intel_dmc_ucode_resume() - init DMC firmware during system resume
- * @dev_priv: i915 drm device
+ * intel_dmc_resume() - init DMC firmware during system resume
+ * @i915: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_dmc_ucode_suspend().
+ * resources released in intel_dmc_suspend().
*/
-void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_resume(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_get(i915);
}
/**
- * intel_dmc_ucode_fini() - unload the DMC firmware.
- * @dev_priv: i915 drm device.
+ * intel_dmc_fini() - unload the DMC firmware.
+ * @i915: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_fini(struct drm_i915_private *i915)
{
- int id;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
- intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ intel_dmc_suspend(i915);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+
+ if (dmc) {
+ for_each_dmc_id(dmc_id)
+ kfree(dmc->dmc_info[dmc_id].payload);
- for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->display.dmc.dmc_info[id].payload);
+ kfree(dmc);
+ i915->display.dmc.dmc = NULL;
+ }
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->display.dmc;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
if (!HAS_DMC(i915))
return;
+ i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
i915_error_printf(m, "DMC loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- i915_error_printf(m, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
+ if (dmc)
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
intel_wakeref_t wakeref;
- struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->display.dmc;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
str_yes_no(IS_ALDERLAKE_P(i915) ||
DISPLAY_VER(i915) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
if (!intel_dmc_has_payload(i915))
goto out;
@@ -1130,9 +1210,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(i915, dc6_reg));
-out:
seq_printf(m, "program base: 0x%08x\n",
intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+
+out:
seq_printf(m, "ssp base: 0x%08x\n",
intel_de_read(i915, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index fd1725de4289..fd607afff2ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -6,54 +6,20 @@
#ifndef __INTEL_DMC_H__
#define __INTEL_DMC_H__
-#include "i915_reg_defs.h"
-#include "intel_wakeref.h"
-#include <linux/workqueue.h>
+#include <linux/types.h>
struct drm_i915_error_state_buf;
struct drm_i915_private;
-
enum pipe;
-enum {
- DMC_FW_MAIN = 0,
- DMC_FW_PIPEA,
- DMC_FW_PIPEB,
- DMC_FW_PIPEC,
- DMC_FW_PIPED,
- DMC_FW_MAX
-};
-
-struct intel_dmc {
- struct work_struct work;
- const char *fw_path;
- u32 max_fw_size; /* bytes */
- u32 version;
- struct dmc_fw_info {
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dmc_offset;
- u32 start_mmioaddr;
- u32 dmc_fw_size; /*dwords */
- u32 *payload;
- bool present;
- } dmc_info[DMC_FW_MAX];
-
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
-void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
void intel_dmc_disable_program(struct drm_i915_private *i915);
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
-void intel_dmc_ucode_fini(struct drm_i915_private *i915);
-void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
-void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+void intel_dmc_fini(struct drm_i915_private *i915);
+void intel_dmc_suspend(struct drm_i915_private *i915);
+void intel_dmc_resume(struct drm_i915_private *i915);
bool intel_dmc_has_payload(struct drm_i915_private *i915);
void intel_dmc_debugfs_register(struct drm_i915_private *i915);
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 62cbab7402e9..da1c00ee92fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -288,7 +288,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
{
- int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
int max_lanes = dig_port->max_lanes;
if (vbt_max_lanes)
@@ -425,7 +425,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
- max_rate = intel_bios_dp_max_link_rate(encoder);
+ max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
if (intel_dp_is_edp(intel_dp)) {
struct intel_connector *connector = intel_dp->attached_connector;
@@ -687,6 +687,12 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
if (DISPLAY_VER(i915) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+
+ /*
+ * According to BSpec, 27 is the max DSC output bpp,
+ * 8 is the min DSC output bpp
+ */
+ bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
} else {
/* Find the nearest match in the array of known BPPs from VESA */
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
@@ -716,9 +722,19 @@ u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
* (LinkSymbolClock)* 8 * (TimeSlots / 64)
* for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
* for MST -> TimeSlots has to be calculated, based on mode requirements
+ *
+ * Due to FEC overhead, the available bw is reduced to 97.2261%.
+ * To support the given mode:
+ * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
+ * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
+ * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
+ * (ModeClock / FEC Overhead)
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
+ * (ModeClock / FEC Overhead * 8)
*/
- bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots,
- intel_dp_mode_to_fec_clock(mode_clock) * 8);
+ bits_per_pixel = ((link_clock * lane_count) * timeslots) /
+ (intel_dp_mode_to_fec_clock(mode_clock) * 8);
drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
@@ -771,6 +787,13 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
min_slice_count = DIV_ROUND_UP(mode_clock,
DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * Due to some DSC engine BW limitations, we need to enable second
+ * slice and VDSC engine, whenever we approach close enough to max CDCLK
+ */
+ if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ min_slice_count = max_t(u8, min_slice_count, 2);
+
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
drm_dbg_kms(&i915->drm,
@@ -1415,6 +1438,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
DP_DSC_MINOR_SHIFT;
}
+static int intel_dp_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
+ * lines is an optimal slice height, but any size can be used as long as
+ * vertical active integer multiple and maximum vertical slice count
+ * requirements are met.
+ */
+ for (slice_height = 108; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ /*
+ * Highly unlikely we reach here as most of the resolutions will end up
+ * finding appropriate slice_height in above loop but returning
+ * slice_height as 2 here as it should work with all resolutions.
+ */
+ return 2;
+}
+
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -1433,17 +1478,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8. Eventually add
- * logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
+ vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
@@ -1585,16 +1620,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
- pipe_config->bigjoiner_pipes) {
- if (pipe_config->dsc.slice_count > 1) {
- pipe_config->dsc.dsc_split = true;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Cannot split stream to use 2 VDSC instances\n");
- return -EINVAL;
- }
- }
+ if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
+ pipe_config->dsc.dsc_split = true;
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
@@ -1727,7 +1754,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -1991,7 +2018,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
static bool intel_dp_has_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2057,7 +2083,7 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
pipe_config->sdp_split_enable =
- intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_has_audio(encoder, conn_state) &&
intel_dp_is_uhbr(pipe_config);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
@@ -2081,7 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_audio =
- intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_has_audio(encoder, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
@@ -2281,10 +2307,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
- wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ connector->base.base.id, connector->base.name,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
+
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
}
/* If the device supports it, try to set the power state appropriately */
@@ -4851,7 +4882,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
/*
@@ -5129,8 +5160,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5142,13 +5174,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
return true;
- return intel_bios_is_port_edp(dev_priv, port);
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+}
+
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return _intel_dp_is_port_edp(i915, devdata, port);
}
static bool
-has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- if (intel_bios_is_lspcon_present(i915, port))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -5183,14 +5226,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
drm_connector_attach_content_type_property(connector);
intel_attach_hdmi_colorspace_property(connector);
} else {
intel_attach_dp_colorspace_property(connector);
}
- if (has_gamut_metadata_dip(dev_priv, port))
+ if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
if (HAS_VRR(dev_priv))
@@ -5232,11 +5275,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n",
- connector->base.base.id, connector->base.name,
- pipe_name(pipe));
}
intel_backlight_setup(connector, pipe);
@@ -5412,7 +5450,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port)) {
+ if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 5a176bfb10a2..eb07dc5d8709 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
+#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
@@ -204,8 +205,19 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- if (is_tc_port)
+ if (is_tc_port) {
intel_tc_port_lock(dig_port);
+ /*
+ * Abort transfers on a disconnected port as required by
+ * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
+ * timeouts that would otherwise happen.
+ * TODO: abort the transfer on non-TC ports as well.
+ */
+ if (!intel_tc_port_connected_locked(&dig_port->base)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ }
aux_domain = intel_aux_power_domain(dig_port);
@@ -366,7 +378,7 @@ out:
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
+out_unlock:
if (is_tc_port)
intel_tc_port_unlock(dig_port);
@@ -737,3 +749,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
+
+static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ /* SKL has DDI E but no AUX E */
+ if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ return AUX_CH_A;
+
+ return (enum aux_ch)encoder->port;
+}
+
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum aux_ch aux_ch;
+
+ aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
+ if (aux_ch != AUX_CH_NONE) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+ return aux_ch;
+ }
+
+ aux_ch = default_aux_ch(encoder);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 738577537bc7..138e340f94ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -6,9 +6,13 @@
#ifndef __INTEL_DP_AUX_H__
#define __INTEL_DP_AUX_H__
+enum aux_ch;
struct intel_dp;
+struct intel_encoder;
void intel_dp_aux_fini(struct intel_dp *intel_dp);
void intel_dp_aux_init(struct intel_dp *intel_dp);
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
+
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 83af95bce98d..95cc5251843e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
+static bool is_intel_tcon_cap(const u8 tcon_cap[4])
+{
+ return tcon_cap[0] >= 1;
+}
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
return false;
- if (tcon_cap[0] >= 1) {
- drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
- tcon_cap[0]);
- } else {
- drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
- tcon_cap[0]);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ connector->base.base.id, connector->base.name,
+ is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
+
+ if (!is_intel_tcon_cap(tcon_cap))
return false;
- }
/*
* If we don't have HDR static metadata there is no way to
@@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
- "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
}
@@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
u8 buf[2] = { 0 };
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf))
- drm_err(dev, "Failed to write brightness level to DPCD\n");
+ drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return;
}
@@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
}
- if (ctrl != old_ctrl)
- if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+ if (ctrl != old_ctrl &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
+static const char *dpcd_vs_pwm_str(bool aux)
+{
+ return aux ? "DPCD" : "PWM";
+}
+
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
@@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
&connector->base.display_info.luminance_range;
int ret;
- if (panel->backlight.edp.intel.sdr_uses_aux) {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
- } else {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux));
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
panel->backlight.min = 0;
}
- drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
- panel->backlight.max);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.min, panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
@@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup PWM backlight controls for eDP backlight: %d\n",
- ret);
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ connector->base.base.id, connector->base.name);
return true;
}
return false;
@@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* interfaces is to probe for Intel's first, and VESA's second.
*/
if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
- drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
- drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 3d3efcf02011..d638054c74ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1379,10 +1379,6 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
}
}
- /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
-
return true;
}
@@ -1433,7 +1429,11 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool passed;
+
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@@ -1451,6 +1451,46 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
else
passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+ /*
+ * Ignore the link failure in CI
+ *
+ * In fixed enviroments like CI, sometimes unexpected long HPDs are
+ * generated by the displays. If ignore_long_hpd flag is set, such long
+ * HPDs are ignored. And probably as a consequence of these ignored
+ * long HPDs, subsequent link trainings are failed resulting into CI
+ * execution failures.
+ *
+ * For test cases which rely on the link training or processing of HPDs
+ * ignore_long_hpd flag can unset from the testcase.
+ */
+ if (!passed && i915->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+ return;
+ }
+
if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
+
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /*
+ * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
+ * disable SDP CRC. This is applicable for Display version 13.
+ * Default value of bit 31 is '0' hence discarding the write
+ * TODO: Corrective actions on SDP corruption yet to be defined
+ */
+ if (intel_dp_is_uhbr(crtc_state))
+ /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SDP_ERROR_DETECTION_CONFIGURATION,
+ DP_SDP_CRC16_128B132B_EN);
+
+ drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 7fa1c0833096..2c8f2775891b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -39,4 +39,6 @@ static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
}
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 054a009e800d..a860cbc5dbea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
return 0;
}
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return connector->port->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
@@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = connector->port->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
+ pipe_config->has_audio =
+ intel_dp_mst_has_audio(conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* for MST we always configure max link bw - the spec doesn't
@@ -604,7 +611,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* no clock to the transcoder"
*/
if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
@@ -684,7 +691,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* here for the following ones.
*/
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(encoder, pipe_config);
+ intel_ddi_enable_transcoder_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 565c06de2432..62b93d097e44 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
"force reprogramming it\n", phy);
}
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val |= phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
phy);
/* Program PLL Rcomp code offset */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
+ 0xE4 << IREF0RC_OFFSET_SHIFT);
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
+ 0xE4 << IREF1RC_OFFSET_SHIFT);
/* Program power gating */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy_info->dual_channel) {
- val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
- }
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
+ OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
+
+ if (phy_info->dual_channel)
+ intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0,
+ DW6_OLDO_DYN_PWR_DOWN_EN);
if (phy_info->rcomp_phy != -1) {
u32 grc_code;
@@ -449,34 +439,25 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
val << GRC_CODE_SLOW_SHIFT |
val;
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
-
- val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
- val |= GRC_DIS | GRC_RDY_OVRD;
- intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
+ 0, GRC_DIS | GRC_RDY_OVRD);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val &= ~phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 380368eff31a..22fc908b7e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -608,10 +608,8 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, WRPLL_CTL(id));
- intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
@@ -626,10 +624,8 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, SPLL_CTL);
- intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, SPLL_CTL);
/*
@@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
- DPLL_CTRL1_SSC(id) |
- DPLL_CTRL1_LINK_RATE_MASK(id));
- val |= pll->state.hw_state.ctrl1 << (id * 6);
-
- intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
+ pll->state.hw_state.ctrl1 << (id * 6));
intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
@@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
@@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, regs[id].ctl);
}
@@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_REF_SEL;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ 0, PORT_PLL_POWER_ENABLE);
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
}
/* Disable 10 bit clock */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
+ PORT_PLL_10BIT_CLK_ENABLE, 0);
/* Write P1 & P2 */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->state.hw_state.ebb0;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
/* Write M2 integer */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- temp &= ~PORT_PLL_M2_INT_MASK;
- temp |= pll->state.hw_state.pll0;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
+ PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
/* Write N */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->state.hw_state.pll1;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
+ PORT_PLL_N_MASK, pll->state.hw_state.pll1);
/* Write M2 fraction */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->state.hw_state.pll2;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
+ PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
/* Write M2 fraction enable */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->state.hw_state.pll3;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
+ PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
/* Write coeff */
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
@@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->state.hw_state.pll8;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
+ PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->state.hw_state.pll9;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
+ PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
@@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
@@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- u32 temp;
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_ENABLE, 0);
if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
!i915_mmio_reg_valid(div0_reg));
if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
- intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
- hw_state->div0);
+ intel_de_rmw(dev_priv, div0_reg,
+ TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
- u32 val;
/*
* Some of the following registers have reserved fields, so program
@@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
- val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
- val |= hw_state->mg_refclkin_ctl;
- intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
+ MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
- val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
- val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- val |= hw_state->mg_clktop2_coreclkctl1;
- intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
+ hw_state->mg_clktop2_coreclkctl1);
- val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
- val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
- val |= hw_state->mg_clktop2_hsclkctl;
- intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
+ hw_state->mg_clktop2_hsclkctl);
intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
@@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
hw_state->mg_pll_frac_lock);
intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
- val &= ~hw_state->mg_pll_bias_mask;
- val |= hw_state->mg_pll_bias;
- intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
+ hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
- val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
- val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
+ hw_state->mg_pll_tdc_coldst_bias_mask,
+ hw_state->mg_pll_tdc_coldst_bias);
intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
@@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
/*
* The spec says we need to "wait" but it also says it should be
@@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
* since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
*/
val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+ val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}
@@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
/* The first steps are done by intel_ddi_post_disable(). */
/*
@@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothing here.
*/
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
/* DVFS post sequence would be here. See the comment above. */
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
/*
* The spec says we need to "wait" but it also says it should be
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 29c6421cd666..760e63cdc0c8 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
- u32 val, bit;
+ u32 bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- bit = PIPECONF_REFRESH_RATE_ALT_VLV;
+ bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
- bit = PIPECONF_REFRESH_RATE_ALT_ILK;
+ bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if (refresh_rate == DRRS_REFRESH_RATE_LOW)
- val |= bit;
- else
- val &= ~bit;
-
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder),
+ bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 96bc117fd6a0..19e422da57dc 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -88,7 +88,8 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
/* each instruction is 2 dwords */
return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
- "DSB buffer overflow\n");
+ "[CRTC:%d:%s] DSB %d buffer overflow\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
}
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
@@ -198,7 +199,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
}
}
-static u32 intel_dsb_align_tail(struct intel_dsb *dsb)
+static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
@@ -210,49 +211,58 @@ static u32 intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
+}
- return aligned_tail;
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
* @dsb: DSB context
+ * @wait_for_vblank: wait for vblank before executing
*
* This function is used to do actual write to hardware using DSB.
*/
-void intel_dsb_commit(struct intel_dsb *dsb)
+void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- tail = intel_dsb_align_tail(dsb);
- if (tail == 0)
+ tail = dsb->free_pos * 4;
+ if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
return;
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm, "DSB engine is busy.\n");
- goto reset;
+ drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+ return;
}
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id),
+ (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) |
DSB_ENABLE);
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
+}
- drm_dbg_kms(&dev_priv->drm,
- "DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma),
- i915_ggtt_offset(dsb->vma) + tail);
+void intel_dsb_wait(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1))
drm_err(&dev_priv->drm,
- "Timed out waiting for DSB workload completion.\n");
+ "[CRTC:%d:%s] DSB %d timed out waiting for idle\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
-reset:
+ /* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
@@ -325,7 +335,8 @@ out_put_rpm:
kfree(dsb);
out:
drm_info_once(&i915->drm,
- "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+ "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
+ crtc->base.base.id, crtc->base.name, DSB1);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 05c221b6d0a4..b8148b47022d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -15,9 +15,12 @@ struct intel_dsb;
struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
unsigned int max_cmds);
+void intel_dsb_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
-void intel_dsb_commit(struct intel_dsb *dsb);
+void intel_dsb_commit(struct intel_dsb *dsb,
+ bool wait_for_vblank);
+void intel_dsb_wait(struct intel_dsb *dsb);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 20e466d843ce..049443245310 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using DCS for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 0be8105cb18a..eb2dcd866cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -444,11 +444,8 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
- intel_de_write(dev_priv, DPLL(pipe),
- dpll[pipe] | DPLL_DVO_2X_MODE);
- }
+ for_each_pipe(dev_priv, pipe)
+ dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 93d0e46e5481..799bdc81a6a9 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2007,6 +2007,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
vm = intel_dpt_create(intel_fb);
if (IS_ERR(vm)) {
+ drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
goto err;
}
@@ -2017,11 +2018,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
+ goto err_free_dpt;
}
return 0;
+err_free_dpt:
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
err:
intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
@@ -2046,6 +2050,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
return ERR_PTR(-EREMOTE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index f76b06293eb9..673bcdfb7ff6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
bool prealloc = false;
void __iomem *vaddr;
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = vma->size;
}
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- goto out_unpin;
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
}
+
+ if (ret)
+ goto out_unpin;
+
info->screen_base = vaddr;
info->screen_size = vma->size;
@@ -561,9 +573,9 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
}
-void intel_fbdev_initial_config_async(struct drm_device *dev)
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -706,9 +718,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
-void intel_fbdev_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 0e95e9472fa3..04fd523a5023 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -15,12 +15,12 @@ struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
@@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0;
}
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
}
@@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 063f1da4f229..c08c26a321b3 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -366,8 +366,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -439,19 +438,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
@@ -538,13 +529,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -593,13 +580,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -719,19 +702,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
}
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE_IVB,
+ FDI_LINK_TRAIN_PATTERN_2_IVB);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_PATTERN_MASK_CPT,
+ FDI_LINK_TRAIN_PATTERN_2_CPT);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
@@ -837,9 +814,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
@@ -865,25 +841,19 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
@@ -898,7 +868,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -906,30 +875,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
-
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
intel_ddi_disable_clock(encoder);
-
- val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -945,16 +899,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
+ intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
intel_de_posting_read(dev_priv, reg);
udelay(200);
@@ -974,28 +926,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
/* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(100);
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
/* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(100);
}
@@ -1007,15 +949,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
intel_de_posting_read(dev_priv, reg);
@@ -1027,11 +967,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
@@ -1042,9 +979,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- /* BPC in FDI rx is consistent with that in PIPECONF */
+ /* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp);
intel_de_posting_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index d636d21fa9ce..b708a62e509a 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -31,6 +31,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
+#include "intel_pch_display.h"
/**
* DOC: fifo underrun handling
@@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ crtc->cpu_fifo_underrun_disabled = !enable;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = !enable;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index 2e47d7d3c101..b00d8abebcf9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -9,8 +9,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_crtc;
enum pipe;
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc, bool enable);
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0bc4f6b48e80..3ddfc8080ee8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -215,41 +215,23 @@ intel_gmbus_reset(struct drm_i915_private *i915)
static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(i915, DSPCLK_GATE_D(i915));
- if (!enable)
- val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, DSPCLK_GATE_D(i915), val);
+ intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
- if (!enable)
- val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
- if (!enable)
- val |= BXT_GMBUS_GATING_DIS;
- else
- val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
+ !enable ? BXT_GMBUS_GATING_DIS : 0);
}
static u32 get_reserved(struct intel_gmbus *bus)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6406fd487ee5..650232c4892b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
@@ -203,13 +204,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_gt *gt = dev_priv->media_gt;
+ struct intel_gsc_uc *gsc = &gt->uc.gsc;
bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
return false;
- /* MEI interface is solid */
+ /* If MTL+ make sure gsc is loaded and proxy is setup */
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ if (!intel_uc_fw_is_running(&gsc->fw))
+ return false;
+
+ /* MEI/GSC interface is solid depending on which is used */
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -943,8 +951,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port);
- intel_de_write(dev_priv, HDCP_REP_CTL,
- intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+ intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
@@ -1143,18 +1150,18 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
@@ -1173,18 +1180,18 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
@@ -1201,18 +1208,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1227,18 +1234,18 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
@@ -1254,18 +1261,18 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
@@ -1281,18 +1288,18 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
@@ -1307,18 +1314,18 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
@@ -1336,20 +1343,21 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
- rep_topology,
- rep_send_ack);
+ ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
+ data,
+ rep_topology,
+ rep_send_ack);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
@@ -1365,18 +1373,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1389,18 +1397,18 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
@@ -1409,22 +1417,22 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
return ret;
}
-static int hdcp2_close_mei_session(struct intel_connector *connector)
+static int hdcp2_close_session(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
&dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1433,7 +1441,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
static int hdcp2_deauthenticate_port(struct intel_connector *connector)
{
- return hdcp2_close_mei_session(connector);
+ return hdcp2_close_session(connector);
}
/* Authentication flow starts from here */
@@ -1819,12 +1827,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_AUTH_STATUS) {
+ LINK_AUTH_STATUS)
/* Link is Authenticated. Now set for Encryption */
- intel_de_write(dev_priv,
- HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
- }
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ 0, CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_set(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1848,8 +1854,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS));
- intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ CTL_LINK_ENCRYPTION_REQ, 0);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -2145,8 +2151,8 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
- dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
+ dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
@@ -2163,30 +2169,30 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
-static const struct component_ops i915_hdcp_component_ops = {
+static const struct component_ops i915_hdcp_ops = {
.bind = i915_hdcp_component_bind,
.unbind = i915_hdcp_component_unbind,
};
-static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
- return MEI_DDI_A;
+ return HDCP_DDI_A;
case PORT_B ... PORT_F:
- return (enum mei_fw_ddi)port;
+ return (enum hdcp_ddi)port;
default:
- return MEI_DDI_INVALID_PORT;
+ return HDCP_DDI_INVALID_PORT;
}
}
-static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
- return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
default: /* eDP, DSI TRANSCODERS are non HDCP capable */
- return MEI_INVALID_TRANSCODER;
+ return HDCP_INVALID_TRANSCODER;
}
}
@@ -2200,20 +2206,20 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
enum port port = dig_port->base.port;
if (DISPLAY_VER(dev_priv) < 12)
- data->fw_ddi = intel_get_mei_fw_ddi_index(port);
+ data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
else
/*
- * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
* with zero(INVALID PORT index).
*/
- data->fw_ddi = MEI_DDI_INVALID_PORT;
+ data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
/*
- * As associated transcoder is set and modified at modeset, here fw_tc
+ * As associated transcoder is set and modified at modeset, here hdcp_transcoder
* is initialized to zero (invalid transcoder index). This will be
* retained for <Gen12 forever.
*/
- data->fw_tc = MEI_INVALID_TRANSCODER;
+ data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
@@ -2235,6 +2241,9 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
{
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ return true;
+
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
@@ -2256,10 +2265,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = true;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
- I915_COMPONENT_HDCP);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ ret = intel_hdcp_gsc_init(dev_priv);
+ else
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
+ I915_COMPONENT_HDCP);
+
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
ret);
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
dev_priv->display.hdcp.comp_added = false;
@@ -2350,7 +2363,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
}
if (DISPLAY_VER(dev_priv) >= 12)
- dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
+ dig_port->hdcp_port_data.hdcp_transcoder =
+ intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2485,7 +2499,10 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = false;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ intel_hdcp_gsc_fini(dev_priv);
+ else
+ component_del(dev_priv->drm.dev, &i915_hdcp_ops);
}
void intel_hdcp_cleanup(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
new file mode 100644
index 000000000000..7e52aea6aa17
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include <drm/i915_hdcp_interface.h>
+
+#include "display/intel_hdcp_gsc.h"
+#include "gem/i915_gem_region.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 14;
+}
+
+static int
+gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
+ struct wired_cmd_initiate_hdcp2_session_out
+ session_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ake_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_init_in.header.api_version = HDCP_API_VERSION;
+ session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
+ session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_init_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
+
+ session_init_in.port.integrated_port_type = data->port_type;
+ session_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ session_init_in.protocol = data->protocol;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ sizeof(session_init_in),
+ (u8 *)&session_init_out,
+ sizeof(session_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_INITIATE_HDCP2_SESSION,
+ session_init_out.header.status);
+ return -EIO;
+ }
+
+ ake_data->msg_id = HDCP_2_2_AKE_INIT;
+ ake_data->tx_caps = session_init_out.tx_caps;
+ memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
+ struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_rxcert_in.header.api_version = HDCP_API_VERSION;
+ verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
+ verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
+
+ verify_rxcert_in.port.integrated_port_type = data->port_type;
+ verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ verify_rxcert_in.cert_rx = rx_cert->cert_rx;
+ memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
+ memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ sizeof(verify_rxcert_in),
+ (u8 *)&verify_rxcert_out,
+ sizeof(verify_rxcert_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_VERIFY_RECEIVER_CERT,
+ verify_rxcert_out.header.status);
+ return -EIO;
+ }
+
+ *km_stored = !!verify_rxcert_out.km_stored;
+ if (verify_rxcert_out.km_stored) {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_stored_km);
+ } else {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
+ }
+
+ memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
+ sizeof(verify_rxcert_out.ekm_buff));
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
+ struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_hprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ send_hprime_in.header.api_version = HDCP_API_VERSION;
+ send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
+ send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
+
+ send_hprime_in.port.integrated_port_type = data->port_type;
+ send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
+ HDCP_2_2_H_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ sizeof(send_hprime_in),
+ (u8 *)&send_hprime_out,
+ sizeof(send_hprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
+ struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !pairing_info)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ pairing_info_in.header.api_version = HDCP_API_VERSION;
+ pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
+ pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
+
+ pairing_info_in.port.integrated_port_type = data->port_type;
+ pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
+ HDCP_2_2_E_KH_KM_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ sizeof(pairing_info_in),
+ (u8 *)&pairing_info_out,
+ sizeof(pairing_info_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_PAIRING_INFO,
+ pairing_info_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_initiate_locality_check(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data)
+{
+ struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
+ struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !lc_init_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ lc_init_in.header.api_version = HDCP_API_VERSION;
+ lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
+ lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
+
+ lc_init_in.port.integrated_port_type = data->port_type;
+ lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
+ (u8 *)&lc_init_out, sizeof(lc_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
+ WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
+ return -EIO;
+ }
+
+ lc_init_data->msg_id = HDCP_2_2_LC_INIT;
+ memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
+ struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_lprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_lprime_in.header.api_version = HDCP_API_VERSION;
+ verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
+ verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
+
+ verify_lprime_in.port.integrated_port_type = data->port_type;
+ verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
+ HDCP_2_2_L_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ sizeof(verify_lprime_in),
+ (u8 *)&verify_lprime_out,
+ sizeof(verify_lprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VALIDATE_LOCALITY,
+ verify_lprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
+ struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ske_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ get_skey_in.header.api_version = HDCP_API_VERSION;
+ get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
+ get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
+
+ get_skey_in.port.integrated_port_type = data->port_type;
+ get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
+ (u8 *)&get_skey_out, sizeof(get_skey_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_GET_SESSION_KEY, get_skey_out.header.status);
+ return -EIO;
+ }
+
+ ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
+ memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
+ HDCP_2_2_E_DKEY_KS_LEN);
+ memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack)
+{
+ struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
+ struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !rep_topology || !rep_send_ack || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_repeater_in.header.api_version = HDCP_API_VERSION;
+ verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
+ verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
+
+ verify_repeater_in.port.integrated_port_type = data->port_type;
+ verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
+ HDCP_2_2_RXINFO_LEN);
+ memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
+ HDCP_2_2_SEQ_NUM_LEN);
+ memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ sizeof(verify_repeater_in),
+ (u8 *)&verify_repeater_out,
+ sizeof(verify_repeater_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VERIFY_REPEATER,
+ verify_repeater_out.header.status);
+ return -EIO;
+ }
+
+ memcpy(rep_send_ack->v, verify_repeater_out.v,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
+
+ return 0;
+}
+
+static int gsc_hdcp_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
+ struct wired_cmd_repeater_auth_stream_req_out
+ verify_mprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+ size_t cmd_size;
+
+ if (!dev || !stream_ready || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ cmd_size = struct_size(verify_mprime_in, streams, data->k);
+ if (cmd_size == SIZE_MAX)
+ return -EINVAL;
+
+ verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
+ if (!verify_mprime_in)
+ return -ENOMEM;
+
+ verify_mprime_in->header.api_version = HDCP_API_VERSION;
+ verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
+
+ verify_mprime_in->port.integrated_port_type = data->port_type;
+ verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
+ drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
+
+ memcpy(verify_mprime_in->streams, data->streams,
+ array_size(data->k, sizeof(*data->streams)));
+
+ verify_mprime_in->k = cpu_to_be16(data->k);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
+ (u8 *)&verify_mprime_out,
+ sizeof(verify_mprime_out));
+ kfree(verify_mprime_in);
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ verify_mprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
+{
+ struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
+ struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ enable_auth_in.header.api_version = HDCP_API_VERSION;
+ enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
+ enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
+
+ enable_auth_in.port.integrated_port_type = data->port_type;
+ enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ enable_auth_in.stream_type = data->streams[0].stream_type;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ sizeof(enable_auth_in),
+ (u8 *)&enable_auth_out,
+ sizeof(enable_auth_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_ENABLE_AUTH, enable_auth_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
+{
+ struct wired_cmd_close_session_in session_close_in = { { 0 } };
+ struct wired_cmd_close_session_out session_close_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_close_in.header.api_version = HDCP_API_VERSION;
+ session_close_in.header.command_id = WIRED_CLOSE_SESSION;
+ session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_close_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
+
+ session_close_in.port.integrated_port_type = data->port_type;
+ session_close_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ sizeof(session_close_in),
+ (u8 *)&session_close_out,
+ sizeof(session_close_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n",
+ session_close_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = gsc_hdcp_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ gsc_hdcp_verify_receiver_cert_prepare_km,
+ .verify_hprime = gsc_hdcp_verify_hprime,
+ .store_pairing_info = gsc_hdcp_store_pairing_info,
+ .initiate_locality_check = gsc_hdcp_initiate_locality_check,
+ .verify_lprime = gsc_hdcp_verify_lprime,
+ .get_session_key = gsc_hdcp_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ gsc_hdcp_repeater_check_flow_prepare_ack,
+ .verify_mprime = gsc_hdcp_verify_mprime,
+ .enable_hdcp_authentication = gsc_hdcp_enable_authentication,
+ .close_hdcp_session = gsc_hdcp_close_session,
+};
+
+/*This function helps allocate memory for the command that we will send to gsc cs */
+static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
+ struct intel_hdcp_gsc_message *hdcp_message)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_vma *vma = NULL;
+ void *cmd;
+ int err;
+
+ /* allocate object of one page for HDCP command memory and store it */
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
+ return PTR_ERR(obj);
+ }
+
+ cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(cmd)) {
+ drm_err(&i915->drm, "Failed to map gsc message page!\n");
+ err = PTR_ERR(cmd);
+ goto out_unpin;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_unmap;
+
+ memset(cmd, 0, obj->base.size);
+
+ hdcp_message->hdcp_cmd = cmd;
+ hdcp_message->vma = vma;
+
+ return 0;
+
+out_unmap:
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message;
+ int ret;
+
+ hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
+
+ if (!hdcp_message)
+ return -ENOMEM;
+
+ /*
+ * NOTE: No need to lock the comp mutex here as it is already
+ * going to be taken before this function called
+ */
+ i915->display.hdcp.hdcp_message = hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
+
+ if (ret)
+ drm_err(&i915->drm, "Could not initialize hdcp_message\n");
+
+ return ret;
+}
+
+static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message =
+ i915->display.hdcp.hdcp_message;
+
+ i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
+ kfree(hdcp_message);
+}
+
+int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+{
+ struct i915_hdcp_master *data;
+ int ret;
+
+ data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_lock(&i915->display.hdcp.comp_mutex);
+ i915->display.hdcp.master = data;
+ i915->display.hdcp.master->hdcp_dev = i915->drm.dev;
+ i915->display.hdcp.master->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(i915);
+ mutex_unlock(&i915->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+{
+ intel_hdcp_gsc_free_message(i915);
+ kfree(i915->display.hdcp.master);
+}
+
+static int intel_gsc_send_sync(struct drm_i915_private *i915,
+ struct intel_gsc_mtl_header *header, u64 addr,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ int ret;
+
+ header->flags = 0;
+ ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
+ header->message_size,
+ addr,
+ msg_out_len + sizeof(*header));
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Checking validity marker for memory sanity
+ */
+ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "invalid validity marker\n");
+ return -EINVAL;
+ }
+
+ if (header->status != 0) {
+ drm_err(&i915->drm, "header status indicates error %d\n",
+ header->status);
+ return -EINVAL;
+ }
+
+ if (header->flags & GSC_OUTFLAG_MSG_PENDING)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * This function can now be used for sending requests and will also handle
+ * receipt of reply messages hence no different function of message retrieval
+ * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * gsc cs memory header as stated in specs after which the normal HDCP payload
+ * will follow
+ */
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct intel_gsc_mtl_header *header;
+ const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
+ struct intel_hdcp_gsc_message *hdcp_message;
+ u64 addr, host_session_id;
+ u32 reply_size, msg_size;
+ int ret, tries = 0;
+
+ if (!intel_uc_uses_gsc_uc(&gt->uc))
+ return -ENODEV;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+ return -ENOSPC;
+
+ hdcp_message = i915->display.hdcp.hdcp_message;
+ header = hdcp_message->hdcp_cmd;
+ addr = i915_ggtt_offset(hdcp_message->vma);
+
+ msg_size = msg_in_len + sizeof(*header);
+ memset(header, 0, msg_size);
+ get_random_bytes(&host_session_id, sizeof(u64));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
+ msg_size, host_session_id);
+ memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
+
+ /*
+ * Keep sending request in case the pending bit is set no need to add
+ * message handle as we are using same address hence loc. of header is
+ * same and it will contain the message handle. we will send the message
+ * 20 times each message 50 ms apart
+ */
+ do {
+ ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
+
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(50);
+
+ } while (++tries < 20);
+
+ if (ret)
+ goto err;
+
+ /* we use the same mem for the reply, so header is in the same loc */
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_len) {
+ drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ reply_size = msg_out_len;
+ } else if (reply_size != msg_out_len) {
+ drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ }
+
+ memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
+
+err:
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
new file mode 100644
index 000000000000..5cc9fd2e88f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_GSC_H__
+#define __INTEL_HDCP_GSC_H__
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd;
+};
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len);
+int intel_hdcp_gsc_init(struct drm_i915_private *i915);
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915);
+
+#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index c0ce6d3dc505..c7e9e1fbed37 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -238,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
@@ -314,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -396,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -472,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv,
@@ -1795,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata);
if (vbt_max_tmds_clock)
max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
@@ -2152,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -2240,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state)
!is_power_of_2(crtc_state->uapi.encoder_mask);
}
+static bool source_supports_scrambling(struct intel_encoder *encoder)
+{
+ /*
+ * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and
+ * scrambling is supported.
+ * But there seem to be cases where certain platforms that support
+ * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is
+ * capped by VBT to less than 340MHz.
+ *
+ * In such cases when an HDMI2.0 sink is connected, it creates a
+ * problem : the platform and the sink both support scrambling but the
+ * HDMI 1.4 retimer chip doesn't.
+ *
+ * So go for scrambling, based on the max tmds clock taking into account,
+ * restrictions coming from VBT.
+ */
+ return intel_hdmi_source_max_tmds_clock(encoder) > 340000;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2302,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
+ if (scdc->scrambling.supported && source_supports_scrambling(encoder)) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -2852,11 +2855,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
enum port port = encoder->port;
u8 ddc_pin;
- ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
if (ddc_pin) {
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2882,8 +2886,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2904,7 +2909,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
+ if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
dig_port->set_infoframes = lspcon_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 907ab7526cb4..b12900446828 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -389,6 +389,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
+ /* Skip calling encode hotplug handlers if ignore long HPD set*/
+ if (dev_priv->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ return;
+ }
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -940,4 +947,6 @@ void intel_hpd_debugfs_register(struct drm_i915_private *i915)
i915, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
i915, &i915_hpd_short_storm_ctl_fops);
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ &i915->display.hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 8aaaef4d7856..5863763de530 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
* @dev_priv: the i915 drm device private data
- * @pipe: pipe
+ * @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
* @ls_clock: Link symbol clock in kHz
@@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* Notify lpe audio driver of eld change.
*/
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
unsigned long irqflags;
@@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
- ppdata->pipe = pipe;
+ ppdata->pipe = cpu_transcoder;
ppdata->ls_clock = ls_clock;
ppdata->dp_output = dp_output;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index f848c5038714..0beecac267ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-enum pipe;
enum port;
+enum transcoder;
struct drm_i915_private;
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 9ff1c0b223ad..bb3b5355a0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return;
if (!lspcon->active) {
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index a1557d84ce0a..a504b3a7fbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -49,6 +49,7 @@
#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
@@ -84,18 +85,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(dev_priv, lvds_reg);
+ val = intel_de_read(i915, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ if (HAS_PCH_CPT(i915))
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
return val & LVDS_PORT_EN;
}
@@ -103,31 +104,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
+ wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
- pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -139,20 +139,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->hw.adjusted_mode.flags |= flags;
+ crtc_state->hw.adjusted_mode.flags |= flags;
if (DISPLAY_VER(dev_priv) < 5)
- pipe_config->gmch_pfit.lvds_border_bits =
+ crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
- pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -216,41 +216,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_CONTROL(0), val);
intel_de_write(dev_priv, PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
intel_de_write(dev_priv, PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
intel_de_write(dev_priv, PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(dev_priv)) {
- assert_fdi_rx_pll_disabled(dev_priv, pipe);
- assert_shared_dpll_disabled(dev_priv,
- pipe_config->shared_dpll);
+ if (HAS_PCH_SPLIT(i915)) {
+ assert_fdi_rx_pll_disabled(i915, pipe);
+ assert_shared_dpll_disabled(i915, crtc_state->shared_dpll);
} else {
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(i915, pipe);
}
- intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(i915)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -260,7 +263,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= pipe_config->gmch_pfit.lvds_border_bits;
+ temp |= crtc_state->gmch_pfit.lvds_border_bits;
/*
* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -283,14 +286,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/*
* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg.
+ * only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
*/
- if (pipe_config->dither && pipe_config->pipe_bpp == 18)
+ if (crtc_state->dither && crtc_state->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -301,7 +304,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(dev_priv, lvds_encoder->reg, temp);
+ intel_de_write(i915, lvds_encoder->reg, temp);
}
/*
@@ -309,25 +312,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
*/
static void intel_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_backlight_enable(pipe_config, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -338,14 +338,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power off\n");
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
@@ -386,19 +384,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
}
static enum drm_mode_status
-intel_lvds_mode_valid(struct drm_connector *connector,
+intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -408,23 +406,21 @@ intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
+static int intel_lvds_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder =
- to_lvds_encoder(intel_encoder);
- struct intel_connector *intel_connector =
- lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
- drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
+ drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -433,14 +429,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
else
lvds_bpp = 6*3;
- if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- drm_dbg_kms(&dev_priv->drm,
+ if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
+ drm_dbg_kms(&i915->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
- pipe_config->pipe_bpp = lvds_bpp;
+ crtc_state->pipe_bpp, lvds_bpp);
+ crtc_state->pipe_bpp = lvds_bpp;
}
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/*
* We have timings from the BIOS for the panel, put them in
@@ -448,17 +444,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ ret = intel_panel_compute_config(connector, adjusted_mode);
if (ret)
return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ if (HAS_PCH_SPLIT(i915))
+ crtc_state->has_pch_encoder = true;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(crtc_state, conn_state);
if (ret)
return ret;
@@ -474,19 +470,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
-static int intel_lvds_get_modes(struct drm_connector *connector)
+static int intel_lvds_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
/* Use panel fixed edid if we have one */
if (!IS_ERR_OR_NULL(fixed_edid)) {
- drm_edid_connector_update(connector, fixed_edid);
+ drm_edid_connector_update(&connector->base, fixed_edid);
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
- return intel_panel_get_modes(intel_connector);
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -585,12 +581,12 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "AOpen i45GMx-I",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
- DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
- },
- },
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
@@ -607,14 +603,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Clientron E830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
- },
- },
- {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
@@ -764,11 +760,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(&i915->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -776,24 +772,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (dev_priv->params.lvds_channel_mode > 0)
- return dev_priv->params.lvds_channel_mode == 2;
+ if (i915->params.lvds_channel_mode > 0)
+ return i915->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -808,8 +804,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(dev_priv, lvds_encoder->reg);
- if (HAS_PCH_CPT(dev_priv))
+ val = intel_de_read(i915, lvds_encoder->reg);
+ if (HAS_PCH_CPT(i915))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -826,56 +822,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *dev_priv)
+void intel_lvds_init(struct drm_i915_private *i915)
{
struct intel_lvds_encoder *lvds_encoder;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
+ struct intel_connector *connector;
const struct drm_edid *drm_edid;
+ struct intel_encoder *encoder;
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support,
+ drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->display.vbt.int_lvds_support) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!i915->display.vbt.int_lvds_support) {
+ drm_dbg_kms(&i915->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(i915))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(dev_priv, lvds_reg);
+ lvds = intel_de_read(i915, lvds_reg);
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(i915)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+ if (!intel_bios_is_lvds_present(i915, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -883,57 +877,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = intel_connector;
+ lvds_encoder->attached_connector = connector;
+ encoder = &lvds_encoder->base;
- intel_encoder = &lvds_encoder->base;
- encoder = &intel_encoder->base;
- connector = &intel_connector->base;
- drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs,
+ drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
- intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_enable = intel_pre_enable_lvds;
- intel_encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- intel_encoder->disable = pch_disable_lvds;
- intel_encoder->post_disable = pch_post_disable_lvds;
+ encoder->enable = intel_enable_lvds;
+ encoder->pre_enable = intel_pre_enable_lvds;
+ encoder->compute_config = intel_lvds_compute_config;
+ if (HAS_PCH_SPLIT(i915)) {
+ encoder->disable = pch_disable_lvds;
+ encoder->post_disable = pch_post_disable_lvds;
} else {
- intel_encoder->disable = gmch_disable_lvds;
+ encoder->disable = gmch_disable_lvds;
}
- intel_encoder->get_hw_state = intel_lvds_get_hw_state;
- intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_lvds_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
-
- intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = PORT_NONE;
- intel_encoder->cloneable = 0;
- if (DISPLAY_VER(dev_priv) < 4)
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->get_hw_state = intel_lvds_get_hw_state;
+ encoder->get_config = intel_lvds_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_lvds_shutdown;
+ connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(connector, encoder);
+
+ encoder->type = INTEL_OUTPUT_LVDS;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = PORT_NONE;
+ encoder->cloneable = 0;
+ if (DISPLAY_VER(i915) < 4)
+ encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
- drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
lvds_encoder->reg = lvds_reg;
- intel_lvds_add_properties(connector);
+ intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -948,13 +940,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&i915->drm.mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) {
const struct edid *edid;
/* FIXME: Make drm_get_edid_switcheroo() return drm_edid */
- edid = drm_get_edid_switcheroo(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
+ edid = drm_get_edid_switcheroo(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
if (edid) {
drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
@@ -962,49 +954,49 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
drm_edid = NULL;
}
} else {
- drm_edid = drm_edid_read_ddc(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
+ drm_edid = drm_edid_read_ddc(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
}
if (drm_edid) {
- if (drm_edid_connector_update(connector, drm_edid) ||
- !drm_edid_connector_add_modes(connector)) {
- drm_edid_connector_update(connector, NULL);
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
} else {
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
+ intel_bios_init_panel_late(i915, &connector->panel, NULL,
IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* Failed to get EDID, what about VBT? */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
/*
* If we didn't get a fixed mode from EDID or VBT, try checking
* if the panel is already turned on. If so, assume that
* whatever is currently programmed is the correct mode.
*/
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
+ if (!intel_panel_preferred_fixed_mode(connector))
goto failed;
- intel_panel_init(intel_connector, drm_edid);
+ intel_panel_init(connector, drm_edid);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1012,10 +1004,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
failed:
- drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
- drm_connector_cleanup(connector);
- drm_encoder_cleanup(encoder);
+ drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(&connector->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
- intel_connector_free(intel_connector);
+ intel_connector_free(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
new file mode 100644
index 000000000000..47c1832819ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_REGS_H__
+#define __INTEL_LVDS_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* LVDS port control */
+#define LVDS _MMIO(0x61180)
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN REG_BIT(31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPE_SEL_MASK REG_BIT(30)
+#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
+#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER REG_BIT(25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY REG_BIT(21)
+#define LVDS_HSYNC_POLARITY REG_BIT(20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE REG_BIT(15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
+#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6)
+#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
+#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4)
+#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
+#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2)
+#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
+#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+
+#define PCH_LVDS _MMIO(0xe1180)
+#define LVDS_DETECTED REG_BIT(1)
+
+#endif /* __INTEL_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
index 0e8248bce52d..0306ade2bc30 100644
--- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
@@ -142,7 +142,9 @@
#define FIA1_BASE 0x163000
#define FIA2_BASE 0x16E000
#define FIA3_BASE 0x16F000
-#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \
+ FIA1_BASE, FIA1_BASE,\
+ FIA2_BASE, FIA3_BASE)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 52cdbd4fc2fa..4558d02641fe 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_color.h"
@@ -21,9 +22,12 @@
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
-#include "intel_pm.h"
+#include "intel_vblank.h"
+#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
@@ -234,12 +238,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!crtc_state->hw.active && !HAS_GMCH(i915))
- return;
-
/*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
+ * We start out with underrun reporting disabled on active
+ * pipes to avoid races.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
@@ -250,19 +251,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
- crtc->cpu_fifo_underrun_disabled = true;
-
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(i915, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
+ intel_init_fifo_underrun_reporting(i915, crtc,
+ !crtc_state->hw.active &&
+ !HAS_GMCH(i915));
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -647,17 +638,14 @@ static void intel_early_display_was(struct drm_i915_private *i915)
* Also known as Wa_14010480278.
*/
if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_write(i915, GEN9_CLKGATE_DIS_0,
- intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
- if (IS_HASWELL(i915)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(i915, CHICKEN_PAR1_1,
- intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ if (IS_HASWELL(i915))
+ intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
/* Display WA #1142:kbl,cfl,cml */
@@ -723,18 +711,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_dpll_sanitize_state(i915);
- if (IS_G4X(i915)) {
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
- } else if (DISPLAY_VER(i915) >= 9) {
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
- } else if (HAS_PCH_SPLIT(i915)) {
- ilk_wm_get_hw_state(i915);
- }
+ intel_wm_get_hw_state(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index b8dce0576512..b7973a05d022 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -1159,13 +1159,10 @@ void intel_opregion_register(struct drm_i915_private *i915)
intel_opregion_resume(i915);
}
-void intel_opregion_resume(struct drm_i915_private *i915)
+static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
- if (!opregion->header)
- return;
-
if (opregion->acpi) {
intel_didl_outputs(i915);
intel_setup_cadls(i915);
@@ -1186,18 +1183,24 @@ void intel_opregion_resume(struct drm_i915_private *i915)
/* Some platforms abuse the _DSM to enable MUX */
intel_dsm_get_bios_data_funcs_supported(i915);
-
- intel_opregion_notify_adapter(i915, PCI_D0);
}
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+void intel_opregion_resume(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
- intel_opregion_notify_adapter(i915, state);
+ if (HAS_DISPLAY(i915))
+ intel_opregion_resume_display(i915);
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+static void intel_opregion_suspend_display(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
@@ -1208,6 +1211,19 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
opregion->acpi->drdy = 0;
}
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (HAS_DISPLAY(i915))
+ intel_opregion_suspend_display(i915);
+}
+
void intel_opregion_unregister(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
@@ -1221,6 +1237,14 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
unregister_acpi_notifier(&opregion->acpi_notifier);
opregion->acpi_notifier.notifier_call = NULL;
}
+}
+
+void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
/* just clear all opregion memory pointers now */
memunmap(opregion->header);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index d02e6696a050..fd2ea8ef0fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -60,6 +60,7 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_cleanup(struct drm_i915_private *i915);
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
@@ -85,6 +86,10 @@ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
return 0;
}
+static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+}
+
static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 42aa04bac261..ce2a34a25211 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -39,6 +39,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index cecc0d007cf3..22507da0b5f0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -10,6 +10,7 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pps.h"
@@ -219,20 +220,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -266,7 +267,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -278,15 +279,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
- val &= ~PIPECONF_BPC_MASK;
+ val &= ~TRANSCONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ val |= pipeconf_val & TRANSCONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) {
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
@@ -307,7 +308,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -317,21 +317,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(dev_priv))
/* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void ilk_pch_pre_enable(struct intel_atomic_state *state,
@@ -414,7 +409,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -456,21 +451,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
/* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
+ TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
@@ -565,9 +553,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK)
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
@@ -580,20 +568,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 3657b2940702..f4c09cc37a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -12,19 +12,13 @@
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7b21438edd9b..24b5b12f7732 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -13,6 +13,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pps.h"
#include "intel_quirks.h"
@@ -1534,17 +1535,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
- if (i915_mmio_reg_valid(regs.pp_div)) {
+ if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(dev_priv, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
+ else
+ intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->t11_t12, 1000)));
drm_dbg_kms(&dev_priv->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 7a72e15e6836..31084d95711d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -152,7 +152,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t imr_reg;
- u32 mask, val;
+ u32 mask;
if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
@@ -164,10 +164,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- val = intel_de_read(dev_priv, imr_reg);
- val &= ~psr_irq_mask_get(intel_dp);
- val |= ~mask;
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
}
static void psr_event_print(struct drm_i915_private *i915,
@@ -245,8 +242,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- u32 val;
-
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
@@ -260,9 +255,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = intel_de_read(dev_priv, imr_reg);
- val |= psr_irq_psr_error_bit_get(intel_dp);
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
schedule_work(&intel_dp->psr.work);
}
@@ -542,6 +535,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ }
+
/* Wa_22012278275:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
@@ -558,31 +559,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp, lines = 7;
-
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ u32 tmp;
- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
val |= tmp;
- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
val |= tmp;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- /*
- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
- * values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resolution mode needs to decrease
- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
- */
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= EDP_PSR2_FAST_WAKE(7);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -591,12 +582,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- /* Wa_1408330847 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK);
-
tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
@@ -637,13 +622,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
- val &= ~EDP_PSR2_IDLE_FRAME_MASK;
- val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
+ EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
@@ -708,6 +690,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 exit_scanlines;
/*
@@ -724,7 +707,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -765,13 +748,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- /* Wa_14010254185 Wa_14010103792 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
- return false;
- }
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -842,6 +818,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -936,6 +952,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -945,13 +967,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
}
- /* Wa_2209313811 */
- if (!crtc_state->enable_psr2_sel_fetch &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- goto unsupported;
- }
-
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
goto unsupported;
@@ -1071,7 +1086,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
}
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
val &= EXITLINE_MASK;
pipe_config->dc3co_exitline = val;
}
@@ -1145,19 +1160,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (intel_dp->psr.dc3co_exitline) {
- u32 val;
-
- /*
- * TODO: if future platforms supports DC3CO in more than one
- * transcoder, EXITLINE will need to be unset when disabling PSR
- */
- val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
- val &= ~EXITLINE_MASK;
- val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
- val |= EXITLINE_ENABLE;
- intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
- }
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ if (intel_dp->psr.dc3co_exitline)
+ intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
+ intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
@@ -1170,13 +1179,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
IS_DISPLAY_VER(dev_priv, 12, 13)) {
- u16 vtotal, vblank;
-
- vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
- crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
- crtc_state->uapi.adjusted_mode.crtc_vblank_start;
- if (vblank > vtotal)
+ if (crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
wa_16013835468_bit_get(intel_dp));
}
@@ -1199,13 +1203,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
ADLP_1_BASED_X_GRANULARITY);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1360,12 +1357,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
- /* Wa_1408330847 */
- if (intel_dp->psr.psr2_sel_fetch_enabled &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1376,12 +1367,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.psr2_enabled) {
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1547,8 +1532,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1559,10 +1544,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (plane->id == PLANE_CURSOR)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+}
+
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1573,11 +1576,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (plane->id == PLANE_CURSOR) {
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
+ if (plane->id == PLANE_CURSOR)
return;
- }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1605,9 +1605,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
@@ -2647,3 +2644,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
break;
}
}
+
+static void
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const char *status = "unknown";
+ u32 val, status_val;
+
+ if (intel_dp->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
+ status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ str_yes_no(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ str_enabled_disabled(enabled), val);
+ psr_source_status(intel_dp, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+
+ seq_printf(m, "PSR2 selective fetch: %s\n",
+ str_enabled_disabled(psr->psr2_sel_fetch_enabled));
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_edp_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+ intel_wakeref_t wakeref;
+ int ret = -ENODEV;
+
+ if (!HAS_PSR(dev_priv))
+ return ret;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+void intel_psr_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ i915, &i915_edp_psr_debug_fops);
+
+ debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ i915, &i915_edp_psr_status_fops);
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ const char *str;
+ int ret;
+ u8 val;
+
+ if (!CAN_PSR(intel_dp)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ else
+ str = "unknown";
+
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
+void intel_psr_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ return;
+
+ debugfs_create_file("i915_psr_sink_status", 0444, root,
+ connector, &i915_psr_sink_status_fops);
+
+ if (HAS_PSR(i915))
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 2ac3a46cccc5..0b95e8aa615f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
@@ -46,16 +47,22 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+void intel_psr_connector_debugfs_add(struct intel_connector *connector);
+void intel_psr_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index c65c771f5c46..1cfb94b5cedb 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_268500 = {
.clock = 268500,
.ref_control =
@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_497750 = {
.clock = 497750,
.ref_control =
@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_209800,
&dg2_hdmi_241500,
&dg2_hdmi_262750,
+ &dg2_hdmi_267300,
&dg2_hdmi_268500,
&dg2_hdmi_296703,
+ &dg2_hdmi_319890,
&dg2_hdmi_497750,
&dg2_hdmi_592000,
&dg2_hdmi_593407,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6b4d24b9cd0..25034bbf1445 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -32,85 +32,20 @@
#include <linux/string_helpers.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
-#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "intel_frontbuffer.h"
#include "intel_sprite.h"
-#include "intel_vrr.h"
-
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_rect *src = &plane_state->uapi.src;
- u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
-
- /*
- * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
- * abuses hsub/vsub so we can't use them here. But as they
- * are limited to 32bpp RGB formats we don't actually need
- * to check anything.
- */
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
- return 0;
-
- /*
- * Hardware doesn't handle subpixel coordinates.
- * Adjust to (macro)pixel boundary, but be careful not to
- * increase the source viewport size, because that could
- * push the downscaling factor out of bounds.
- */
- src_x = src->x1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_y = src->y1 >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- drm_rect_init(src, src_x << 16, src_y << 16,
- src_w << 16, src_h << 16);
-
- if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
- hsub = 2;
- vsub = 2;
- } else {
- hsub = fb->format->hsub;
- vsub = fb->format->vsub;
- }
-
- if (rotated)
- hsub = vsub = max(hsub, vsub);
-
- if (src_x % hsub || src_w % hsub) {
- drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_x, src_w, hsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- if (src_y % vsub || src_h % vsub) {
- drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_y, src_h, vsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- return 0;
-}
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
@@ -1217,7 +1152,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
}
intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
* The control register self-arms if the plane was previously
@@ -1448,124 +1384,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9;
-}
-
-static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
- const struct drm_intel_sprite_colorkey *set)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-
- *key = *set;
-
- /*
- * We want src key enabled on the
- * sprite and not on the primary.
- */
- if (plane->id == PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_SOURCE)
- key->flags = 0;
-
- /*
- * On SKL+ we want dst key enabled on
- * the primary and not on the sprite.
- */
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- key->flags = 0;
-}
-
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_intel_sprite_colorkey *set = data;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
-
- /* ignore the pointless "none" flag */
- set->flags &= ~I915_SET_COLORKEY_NONE;
-
- if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- /* Make sure we don't try to enable both src & dest simultaneously */
- if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- plane = drm_plane_find(dev, file_priv, set->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
- return -ENOENT;
-
- /*
- * SKL+ only plane 2 can do destination keying against plane 1.
- * Also multiple planes can't do destination keying on the same
- * pipe simultaneously.
- */
- if (DISPLAY_VER(dev_priv) >= 9 &&
- to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(plane->dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
- while (1) {
- plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
-
- /*
- * On some platforms we have to configure
- * the dst colorkey on the primary plane.
- */
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv,
- to_intel_plane(plane)->pipe);
-
- plane_state = drm_atomic_get_plane_state(state,
- crtc->base.primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
- }
-
- if (!ret)
- ret = drm_atomic_commit(state);
-
- if (ret != -EDEADLK)
- break;
-
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- }
-
- drm_atomic_state_put(state);
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
new file mode 100644
index 000000000000..70a391083751
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+#include "intel_sprite_uapi.h"
+
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return DISPLAY_VER(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret = 0;
+
+ /* ignore the pointless "none" flag */
+ set->flags &= ~I915_SET_COLORKEY_NONE;
+
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
+
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+ }
+
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
+
+ drm_atomic_state_put(state);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.h b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
new file mode 100644
index 000000000000..3eb50025acaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_UAPI_H__
+#define __INTEL_SPRITE_UAPI_H__
+
+struct drm_device;
+struct drm_file;
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* __INTEL_SPRITE_UAPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index f45328712bff..bd8c9df5f98f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power_map.h"
@@ -118,6 +119,24 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, !enabled);
}
+static enum intel_display_power_domain
+tc_port_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+
+ return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
+}
+
+static void
+assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
+}
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -418,9 +437,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, assume safe mode\n",
+ "Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name);
- return true;
+ return false;
}
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
@@ -464,7 +483,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
u32 live_status_mask;
int max_lanes;
- if (!tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port) &&
+ !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
@@ -539,62 +559,171 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
}
}
-static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port,
+ bool phy_is_ready, bool phy_is_owned)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- if (!tc_phy_status_complete(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
- return dig_port->tc_mode == TC_PORT_TBT_ALT;
- }
+ drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+
+ return phy_is_ready && phy_is_owned;
+}
- /* On ADL-P the PHY complete flag is set in TBT mode as well. */
- if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
- return true;
+static bool tc_phy_is_connected(struct intel_digital_port *dig_port,
+ enum icl_port_dpll_id port_pll_type)
+{
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool phy_is_ready = tc_phy_status_complete(dig_port);
+ bool phy_is_owned = tc_phy_is_owned(dig_port);
+ bool is_connected;
- if (!tc_phy_is_owned(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
- dig_port->tc_port_name);
+ if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned))
+ is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
+ else
+ is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- return false;
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
+ dig_port->tc_port_name,
+ str_yes_no(is_connected),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
+
+ return is_connected;
+}
+
+static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (wait_for(tc_phy_status_complete(dig_port), 100))
+ drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ dig_port->tc_port_name);
+}
+
+static enum tc_port_mode
+hpd_mask_to_tc_mode(u32 live_status_mask)
+{
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return TC_PORT_DISCONNECTED;
+}
+
+static enum tc_port_mode
+tc_phy_hpd_live_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ return hpd_mask_to_tc_mode(live_status_mask);
+}
+
+static enum tc_port_mode
+get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ return live_mode;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ case TC_PORT_DISCONNECTED:
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_LEGACY;
+ else
+ return TC_PORT_DP_ALT;
}
+}
- return dig_port->tc_mode == TC_PORT_DP_ALT ||
- dig_port->tc_mode == TC_PORT_LEGACY;
+static enum tc_port_mode
+get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ return TC_PORT_DISCONNECTED;
+ case TC_PORT_DP_ALT:
+ case TC_PORT_TBT_ALT:
+ return TC_PORT_TBT_ALT;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_DISCONNECTED;
+ else
+ return TC_PORT_TBT_ALT;
+ }
}
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port);
+ bool phy_is_ready;
+ bool phy_is_owned;
enum tc_port_mode mode;
- if (!tc_phy_is_owned(dig_port) ||
- drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
- return TC_PORT_TBT_ALT;
+ /*
+ * For legacy ports the IOM firmware initializes the PHY during boot-up
+ * and system resume whether or not a sink is connected. Wait here for
+ * the initialization to get ready.
+ */
+ if (dig_port->tc_legacy_port)
+ tc_phy_wait_for_ready(dig_port);
- mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
- if (live_status_mask) {
- enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+ phy_is_ready = tc_phy_status_complete(dig_port);
+ phy_is_owned = tc_phy_is_owned(dig_port);
- if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
- mode = live_mode;
+ if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) {
+ mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode);
+ } else {
+ drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode);
}
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(mode),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ tc_port_mode_name(live_mode));
+
return mode;
}
+static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port)
+{
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_LEGACY;
+
+ return TC_PORT_TBT_ALT;
+}
+
+static enum tc_port_mode
+hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask)
+{
+ enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
+
+ if (mode != TC_PORT_DISCONNECTED)
+ return mode;
+
+ return default_tc_mode(dig_port);
+}
+
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
- if (live_status_mask)
- return fls(live_status_mask) - 1;
-
- return TC_PORT_TBT_ALT;
+ return hpd_mask_to_target_mode(dig_port, live_status_mask);
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
@@ -660,11 +789,24 @@ static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
tc_cold_unblock(dig_port, domain, wref);
}
-static void
-intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
- int refcount)
+static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount++;
+}
+
+static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount--;
+}
+
+static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
{
- dig_port->tc_link_refcount = refcount;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ assert_tc_port_power_enabled(dig_port);
+
+ return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ DDI_BUF_CTL_ENABLE;
}
/**
@@ -679,6 +821,7 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
+ bool update_mode = false;
mutex_lock(&dig_port->tc_lock);
@@ -689,63 +832,105 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
tc_cold_wref = tc_cold_block(dig_port, &domain);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ /*
+ * Save the initial mode for the state check in
+ * intel_tc_port_sanitize_mode().
+ */
+ dig_port->tc_init_mode = dig_port->tc_mode;
+ if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ dig_port->tc_lock_wakeref =
+ tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+
+ /*
+ * The PHY needs to be connected for AUX to work during HW readout and
+ * MST topology resume, but the PHY mode can only be changed if the
+ * port is disabled.
+ *
+ * An exception is the case where BIOS leaves the PHY incorrectly
+ * disconnected on an enabled legacy port. Work around that by
+ * connecting the PHY even though the port is enabled. This doesn't
+ * cause a problem as the PHY ownership state is ignored by the
+ * IOM/TCSS firmware (only display can own the PHY in that case).
+ */
+ if (!tc_port_is_enabled(dig_port)) {
+ update_mode = true;
+ } else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) {
+ drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port);
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected on enabled port, connecting it\n",
+ dig_port->tc_port_name);
+ update_mode = true;
+ }
+
+ if (update_mode)
+ intel_tc_port_update_mode(dig_port, 1, false);
+
/* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
- intel_tc_port_link_init_refcount(dig_port, 1);
- dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+ __intel_tc_port_get_link(dig_port);
tc_cold_unblock(dig_port, domain, tc_cold_wref);
- drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
-
mutex_unlock(&dig_port->tc_lock);
}
+static bool tc_port_has_active_links(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
+ int active_links = 0;
+
+ if (dig_port->dp.is_mst) {
+ /* TODO: get the PLL type for MST, once HW readout is done for it. */
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ } else if (crtc_state && crtc_state->hw.active) {
+ pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
+ active_links = 1;
+ }
+
+ if (active_links && !tc_phy_is_connected(dig_port, pll_type))
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+
+ return active_links;
+}
+
/**
* intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
* @dig_port: digital port
+ * @crtc_state: atomic state of CRTC connected to @dig_port
*
* Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
* loading and system resume:
* If the encoder is enabled keep the TypeC mode/PHY connected state locked until
* the encoder is disabled.
* If the encoder is disabled make sure the PHY is disconnected.
+ * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
*/
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_encoder *encoder = &dig_port->base;
- int active_links = 0;
mutex_lock(&dig_port->tc_lock);
- if (dig_port->dp.is_mst)
- active_links = intel_dp_mst_encoder_active_links(dig_port);
- else if (encoder->base.crtc)
- active_links = to_intel_crtc(encoder->base.crtc)->active;
-
drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
- intel_tc_port_link_init_refcount(dig_port, active_links);
-
- if (active_links) {
- if (!icl_tc_phy_is_connected(dig_port))
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
- } else {
+ if (!tc_port_has_active_links(dig_port, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
* we'll just switch to disconnected mode from it here without
* a note.
*/
- if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (dig_port->tc_init_mode != TC_PORT_TBT_ALT &&
+ dig_port->tc_init_mode != TC_PORT_DISCONNECTED)
drm_dbg_kms(&i915->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ tc_port_mode_name(dig_port->tc_init_mode));
icl_tc_phy_disconnect(dig_port);
+ __intel_tc_port_put_link(dig_port);
tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
fetch_and_zero(&dig_port->tc_lock_wakeref));
@@ -768,16 +953,23 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+
+ return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode);
+}
+
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
intel_tc_port_lock(dig_port);
-
- is_connected = tc_port_live_status_mask(dig_port) &
- BIT(dig_port->tc_mode);
-
+ is_connected = intel_tc_port_connected_locked(encoder);
intel_tc_port_unlock(dig_port);
return is_connected;
@@ -857,14 +1049,14 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
__intel_tc_port_lock(dig_port, required_lanes);
- dig_port->tc_link_refcount++;
+ __intel_tc_port_get_link(dig_port);
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
intel_tc_port_lock(dig_port);
- --dig_port->tc_link_refcount;
+ __intel_tc_port_put_link(dig_port);
intel_tc_port_unlock(dig_port);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index d54082e2d5e8..79667d977508 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@@ -17,6 +18,7 @@ bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@@ -25,7 +27,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b986bf075889..3b5ff84dc615 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -930,8 +930,7 @@ intel_enable_tv(struct intel_atomic_state *state,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,8 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 4c83e2320bca..f8bf9810527d 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -8,6 +8,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
/*
* This timing diagram depicts the video signal in and
@@ -26,7 +27,7 @@
* |
* | frame start:
* | generate frame start interrupt (aka. vblank interrupt) (gmch)
- * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | may be shifted forward 1-3 extra lines via TRANSCONF
* | |
* | | start of vsync:
* | | generate vsync interrupt
@@ -54,7 +55,7 @@
* Summary:
* - most events happen at the start of horizontal sync
* - frame start happens at the start of horizontal blank, 1-4 lines
- * (depending on PIPECONF settings) after the start of vblank
+ * (depending on TRANSCONF settings) after the start of vblank
* - gen3/4 pixel and frame counter are synchronized with the start
* of horizontal active on the first line of vertical active
*/
@@ -439,3 +440,94 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, true);
}
+
+static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
+ */
+ if (DISPLAY_VER(i915) == 2) {
+ int vtotal;
+
+ vtotal = adjusted_mode->crtc_vtotal;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ return vtotal - 1;
+ } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return 2;
+ } else {
+ return 1;
+ }
+}
+
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_display_mode adjusted_mode;
+ int vmax_vblank_start = 0;
+ unsigned long irqflags;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
+
+ /*
+ * Belts and suspenders locking to guarantee everyone sees 100%
+ * consistent state during fastset seamless refresh rate changes.
+ *
+ * vblank_time_lock takes care of all drm_vblank.c stuff, and
+ * uncore.lock takes care of __intel_get_crtc_scanline() which
+ * may get called elsewhere as well.
+ *
+ * TODO maybe just protect everything (including
+ * __intel_get_crtc_scanline()) with vblank_time_lock?
+ * Need to audit everything to make sure it's safe.
+ */
+ spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
+ spin_lock(&i915->uncore.lock);
+
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
+
+ crtc->vmax_vblank_start = vmax_vblank_start;
+
+ crtc->mode_flags = crtc_state->mode_flags;
+
+ crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
+
+ spin_unlock(&i915->uncore.lock);
+ spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index c9fea2c2a990..0884db7e76ae 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -11,6 +11,7 @@
struct drm_crtc;
struct intel_crtc;
+struct intel_crtc_state;
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
@@ -19,5 +20,6 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 207b2a648d32..09b32ffdc552 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -17,6 +17,7 @@
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
new file mode 100644
index 000000000000..4fd883463752
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_REGS_H__
+#define __INTEL_VDSC_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
+#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#endif /* __INTEL_VDSC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5ff6aed9575e..4228f26b4c11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -144,17 +144,11 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* is deprecated.
*/
if (DISPLAY_VER(i915) >= 13) {
- /*
- * FIXME: Subtract Window2 delay from below value.
- *
- * Window2 specifies time required to program DSB (Window2) in
- * number of scan lines. Assuming 0 for no DSB.
- */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay;
+ crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
crtc_state->framestart_delay - 1);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
new file mode 100644
index 000000000000..bb99179cd5fd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_display_types.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->update_wm)
+ i915->display.funcs.wm->update_wm(i915);
+}
+
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_pipe_wm)
+ return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
+
+ return 0;
+}
+
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!i915->display.funcs.wm->compute_intermediate_wm)
+ return 0;
+
+ if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
+ return 0;
+
+ return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
+}
+
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->initial_watermarks) {
+ i915->display.funcs.wm->initial_watermarks(state, crtc);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->atomic_update_watermarks)
+ i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+}
+
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->optimize_watermarks)
+ i915->display.funcs.wm->optimize_watermarks(state, crtc);
+}
+
+int intel_compute_global_watermarks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_global_watermarks)
+ return i915->display.funcs.wm->compute_global_watermarks(state);
+
+ return 0;
+}
+
+void intel_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->get_hw_state)
+ return i915->display.funcs.wm->get_hw_state(i915);
+}
+
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->hw.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->hw.fb != NULL;
+ else
+ return plane_state->uapi.visible;
+}
+
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
+{
+ int level;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
+ }
+}
+
+void intel_wm_init(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 9)
+ skl_wm_init(i915);
+ else
+ i9xx_wm_init(i915);
+}
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int level;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 new[8] = { 0 };
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != dev_priv->display.wm.num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ return len;
+}
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+void intel_wm_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_pri_wm_latency_fops);
+
+ debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_spr_wm_latency_fops);
+
+ debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_cur_wm_latency_fops);
+
+ skl_watermark_debugfs_register(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
new file mode 100644
index 000000000000..48429ac140d2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_WM_H__
+#define __INTEL_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_update_watermarks(struct drm_i915_private *i915);
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_global_watermarks(struct intel_atomic_state *state);
+void intel_wm_get_hw_state(struct drm_i915_private *i915);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *i915,
+ const char *name, const u16 wm[]);
+void intel_wm_init(struct drm_i915_private *i915);
+void intel_wm_debugfs_register(struct drm_i915_private *i915);
+
+#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h
index 93152537b420..628b7c0ce484 100644
--- a/drivers/gpu/drm/i915/intel_pm_types.h
+++ b/drivers/gpu/drm/i915/display/intel_wm_types.h
@@ -3,12 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#ifndef __INTEL_PM_TYPES_H__
-#define __INTEL_PM_TYPES_H__
+#ifndef __INTEL_WM_TYPES_H__
+#define __INTEL_WM_TYPES_H__
#include <linux/types.h>
-#include "display/intel_display_limits.h"
+#include "intel_display_limits.h"
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
@@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-#endif /* __INTEL_PM_TYPES_H__ */
+#endif /* __INTEL_WM_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 9b172a1e90de..fd0065a46ec5 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -17,7 +17,6 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -642,7 +641,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1260,7 +1259,7 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
}
static void
@@ -1287,6 +1286,8 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
@@ -2180,7 +2181,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
if (DISPLAY_VER(i915) < 12)
return false;
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ /* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
return false;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index d1670cc3eff2..50a9a6adbe32 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -5,21 +5,22 @@
#include <drm/drm_blend.h>
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "skl_watermark.h"
-
-#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_reg.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
static void skl_sagv_disable(struct drm_i915_private *i915);
@@ -64,7 +65,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
static bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ return HAS_SAGV(i915) &&
i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
}
@@ -92,7 +93,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
return val;
} else if (DISPLAY_VER(i915) == 11) {
return 10;
- } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ } else if (HAS_SAGV(i915)) {
return 30;
} else {
return 0;
@@ -101,7 +102,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!intel_has_sagv(i915))
+ if (!HAS_SAGV(i915))
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
@@ -359,7 +360,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(i915);
+ for (level = i915->display.wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -704,16 +705,38 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
+static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+ const struct skl_wm_params *wp)
+{
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ if (latency == 0)
+ return 0;
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ latency += 15;
+
+ return latency;
+}
+
static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ int level;
ret = skl_compute_wm_params(crtc_state, 256,
drm_format_info(DRM_FORMAT_ARGB8888),
@@ -722,8 +745,8 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
crtc_state->pixel_rate, &wp, 0);
drm_WARN_ON(&i915->drm, ret);
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = i915->display.wm.skl_latency[level];
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(i915, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -1407,16 +1430,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+ const struct skl_plane_wm *wm)
{
/*
* Wa_1408961008:icl, ehl
* Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
+ * Wa_14017887344:icl
+ * Wa_14017868169:adl, tgl
+ * Due to some power saving optimizations, different subsystems
+ * like PSR, might still use even disabled wm level registers,
+ * for "reference", so lets keep at least the values sane.
+ * Considering amount of WA requiring us to do similar things, was
+ * decided to simply do it for all of the platforms, as those wm
+ * levels are disabled, this isn't going to do harm anyway.
*/
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+ return level > 0 && !wm->wm[level].enable;
}
struct skl_plane_ddb_iter {
@@ -1492,7 +1521,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1568,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for (level++; level < i915->display.wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1585,12 +1614,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && !wm->wm[level].enable &&
- wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ if (skl_need_wm_copy_wa(i915, level, wm)) {
+ wm->wm[level].blocks = wm->wm[level - 1].blocks;
+ wm->wm[level].lines = wm->wm[level - 1].lines;
+ wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
}
}
}
@@ -1835,17 +1862,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
- latency += 4;
-
- if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
- latency += 15;
-
method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
@@ -1967,12 +1983,12 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
struct skl_wm_level *levels)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level *result_prev = &levels[0];
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = skl_wm_latency(i915, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -1992,7 +2008,8 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
unsigned int latency = 0;
if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+ latency = i915->display.sagv.block_time_us +
+ skl_wm_latency(i915, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
@@ -2184,6 +2201,119 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool
+skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
+ int wm0_lines, int latency)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* FIXME missing scaler and DSC pre-fill time */
+ return crtc_state->framestart_delay +
+ intel_usecs_to_scanlines(adjusted_mode, latency) +
+ wm0_lines >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
+
+static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+ int wm0_lines = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /* FIXME what about !skl_wm_has_lines() platforms? */
+ wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
+ }
+
+ return wm0_lines;
+}
+
+static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
+ int wm0_lines)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int level;
+
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ int latency;
+
+ /* FIXME should we care about the latency w/a's? */
+ latency = skl_wm_latency(i915, level, NULL);
+ if (latency == 0)
+ continue;
+
+ /* FIXME is it correct to use 0 latency for wm0 here? */
+ if (level == 0)
+ latency = 0;
+
+ if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ return level;
+ }
+
+ return -EINVAL;
+}
+
+static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int wm0_lines, level;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ wm0_lines = skl_max_wm0_lines(crtc_state);
+
+ level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ if (level < 0)
+ return level;
+
+ /*
+ * FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
+ * based on whether we're limited by the vblank duration.
+ *
+ * FIXME also related to skl+ w/a 1136 (also unimplemented as of
+ * now) perhaps?
+ */
+
+ for (level++; level < i915->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * FIXME just clear enable or flag the entire
+ * thing as bad via min_ddb_alloc=U16_MAX?
+ */
+ wm->wm[level].enable = false;
+ wm->uv_wm[level].enable = false;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ i915->display.sagv.block_time_us &&
+ skl_is_vblank_too_short(crtc_state, wm0_lines,
+ i915->display.sagv.block_time_us)) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ wm->sagv.wm0.enable = false;
+ wm->sagv.trans_wm.enable = false;
+ }
+ }
+
+ return 0;
+}
+
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2213,7 +2343,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
- return 0;
+ return skl_wm_check_vblank(crtc_state);
}
static void skl_ddb_entry_write(struct drm_i915_private *i915,
@@ -2248,7 +2378,6 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -2256,8 +2385,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2285,14 +2415,14 @@ void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, CUR_WM(pipe, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2324,9 +2454,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2398,6 +2528,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2674,9 +2806,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *new_pipe_wm)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2755,6 +2887,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2810,16 +2944,14 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int level, max_level;
enum plane_id plane_id;
+ int level;
u32 val;
- max_level = ilk_wm_max_level(i915);
-
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
else
@@ -2856,7 +2988,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
@@ -2956,7 +3088,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
@@ -2992,6 +3124,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915)
}
}
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+}
+
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state)
{
@@ -3002,9 +3140,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
struct skl_pipe_wm wm;
} *hw;
const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(i915);
struct intel_plane *plane;
u8 hw_enabled_slices;
+ int level;
if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
return;
@@ -3031,7 +3169,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
@@ -3153,7 +3291,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915)
static void
adjust_wm_latency(struct drm_i915_private *i915,
- u16 wm[], int max_level, int read_latency)
+ u16 wm[], int num_levels, int read_latency)
{
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3163,12 +3301,12 @@ adjust_wm_latency(struct drm_i915_private *i915,
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < num_levels; level++) {
if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
+ for (i = level + 1; i < num_levels; i++)
wm[i] = 0;
- max_level = level - 1;
+ num_levels = level;
break;
}
}
@@ -3181,7 +3319,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < num_levels; level++)
wm[level] += read_latency;
}
@@ -3197,7 +3335,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
u32 val;
val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
@@ -3212,12 +3350,12 @@ static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, max_level, 6);
+ adjust_wm_latency(i915, wm, num_levels, 6);
}
static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
int mult = IS_DG2(i915) ? 2 : 1;
u32 val;
@@ -3249,11 +3387,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, max_level, read_latency);
+ adjust_wm_latency(i915, wm, num_levels, read_latency);
}
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
+ if (HAS_HW_SAGV_WM(i915))
+ i915->display.wm.num_levels = 6;
+ else
+ i915->display.wm.num_levels = 8;
+
if (DISPLAY_VER(i915) >= 14)
mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
else
@@ -3264,6 +3407,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915)
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
@@ -3541,13 +3685,34 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
.write = skl_watermark_ipc_status_write
};
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+static int intel_sagv_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ static const char * const sagv_status[] = {
+ [I915_SAGV_UNKNOWN] = "unknown",
+ [I915_SAGV_DISABLED] = "disabled",
+ [I915_SAGV_ENABLED] = "enabled",
+ [I915_SAGV_NOT_CONTROLLED] = "not controlled",
+ };
+
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
+
+void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- if (!HAS_IPC(i915))
- return;
+ if (HAS_IPC(i915))
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ if (HAS_SAGV(i915))
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
+ &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 37954c472070..f91a3d4ddc07 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -10,7 +10,7 @@
#include "intel_display_limits.h"
#include "intel_global_state.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct intel_atomic_state;
@@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
-void skl_wm_get_hw_state(struct drm_i915_private *i915);
-void skl_wm_sanitize(struct drm_i915_private *i915);
-
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state);
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_debugfs_register(struct drm_i915_private *i915);
void skl_wm_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 2289f6b1b4eb..028965ab442d 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
bool cold_boot = false;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- intel_de_write(dev_priv, MIPI_CTRL(port),
- tmp | GLK_MIPIIO_ENABLE);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
- tmp &= ~GLK_LP_WAKE;
- else
- tmp |= GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
@@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
}
/* Get IO out of reset */
- val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
- val | GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
@@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_EXIT | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
-
- val = intel_de_read(dev_priv, MIPI_CTRL(port));
- val &= ~GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK,
+ ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
}
}
@@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
- intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
@@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Enter ULPS */
- for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
}
/* Clear MIPI mode */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~GLK_MIPIIO_ENABLE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
- u32 val;
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
DEVICE_READY | ULPS_STATE_ENTER);
@@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
@@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- u32 temp;
+ u32 temp = intel_dsi->pixel_overlap;
+
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- temp = intel_de_read(dev_priv,
- MIPI_CTRL(port));
- temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- BXT_PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, MIPI_CTRL(port),
- temp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIXEL_OVERLAP_CNT_MASK,
+ temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
- temp &= ~PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
+ intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ PIXEL_OVERLAP_CNT_MASK,
+ temp << PIXEL_OVERLAP_CNT_SHIFT);
}
}
@@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 temp;
/* de-assert ip_tg_enable signal */
- temp = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -787,7 +741,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
- u32 val;
bool glk_cold_boot = false;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -810,9 +763,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
@@ -820,12 +771,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 val;
-
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ 0, DPOUNIT_CLOCK_GATE_DISABLE);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -949,7 +897,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -987,21 +934,16 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
} else {
- u32 val;
-
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
/* Assert reset */
@@ -1058,7 +1000,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -1130,7 +1072,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
pipe_config->mode_flags |=
@@ -1432,11 +1374,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~BXT_PIPE_SELECT_MASK;
-
- tmp |= BXT_PIPE_SELECT(pipe);
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
@@ -1605,7 +1544,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
if (IS_GEMINILAKE(dev_priv))
return;
@@ -1620,9 +1558,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
vlv_dsi_reset_clocks(encoder, port);
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index af7402127cd9..b697badbbe71 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
@@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val |= BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
@@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
- tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
- tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 90a967374b1a..d8e06e783e30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -909,7 +909,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
- dsm_size = lmem_size - dsm_base;
+ dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
io_size = dsm_size;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3bb1f7f0110e..ff81af4c8202 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -108,31 +108,30 @@ struct tiled_blits {
u32 height;
};
-static bool supports_x_tiling(const struct drm_i915_private *i915)
+static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
int gen = GRAPHICS_VER(i915);
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ drm_WARN_ON(&i915->drm, gen < 9);
+
if (gen < 12)
return true;
- if (!HAS_LMEM(i915) || IS_DG1(i915))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return false;
- return true;
+ return HAS_DISPLAY(i915);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
{
- int gen = GRAPHICS_VER(buf->vma->vm->i915);
-
- if (gen < 9)
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
return false;
- if (gen < 12)
- return true;
-
/* filter out platforms with unsupported X-tile support in fastblit */
- if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d4e29da74612..ad3413242100 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -1143,12 +1144,130 @@ err_put:
return ret;
}
+static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
+{
+ static const union intel_engine_tlb_inv_reg gen8_regs[] = {
+ [RENDER_CLASS].reg = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
+ [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
+ };
+ static const union intel_engine_tlb_inv_reg gen12_regs[] = {
+ [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xehp_regs[] = {
+ [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = engine->i915;
+ const unsigned int instance = engine->instance;
+ const unsigned int class = engine->class;
+ const union intel_engine_tlb_inv_reg *regs;
+ union intel_engine_tlb_inv_reg reg;
+ unsigned int num = 0;
+ u32 val;
+
+ /*
+ * New platforms should not be added with catch-all-newer (>=)
+ * condition so that any later platform added triggers the below warning
+ * and in turn mandates a human cross-check of whether the invalidation
+ * flows have compatible semantics.
+ *
+ * For instance with the 11.00 -> 12.00 transition three out of five
+ * respective engine registers were moved to masked type. Then after the
+ * 12.00 -> 12.50 transition multi cast handling is required too.
+ */
+
+ if (engine->gt->type == GT_MEDIA) {
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ regs = xelpmp_regs;
+ num = ARRAY_SIZE(xelpmp_regs);
+ }
+ } else {
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
+ regs = xehp_regs;
+ num = ARRAY_SIZE(xehp_regs);
+ } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return 0;
+ }
+ }
+
+ if (gt_WARN_ONCE(engine->gt, !num,
+ "Platform does not implement TLB invalidation!"))
+ return -ENODEV;
+
+ if (gt_WARN_ON_ONCE(engine->gt,
+ class >= num ||
+ (!regs[class].reg.reg &&
+ !regs[class].mcr_reg.reg)))
+ return -ERANGE;
+
+ reg = regs[class];
+
+ if (regs == xelpmp_regs && class == OTHER_CLASS) {
+ /*
+ * There's only a single GSC instance, but it uses register bit
+ * 1 instead of either 0 or OTHER_GSC_INSTANCE.
+ */
+ GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
+ val = 1;
+ } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
+ reg.reg = GEN8_M2TCR;
+ val = 0;
+ } else {
+ val = instance;
+ }
+
+ val = BIT(val);
+
+ engine->tlb_inv.mcr = regs == xehp_regs;
+ engine->tlb_inv.reg = reg;
+ engine->tlb_inv.done = val;
+
+ if (GRAPHICS_VER(i915) >= 12 &&
+ (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS ||
+ engine->class == OTHER_CLASS))
+ engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ else
+ engine->tlb_inv.request = val;
+
+ return 0;
+}
+
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
init_llist_head(&engine->barrier_tasks);
+ err = intel_engine_init_tlb_invalidation(engine);
+ if (err)
+ return err;
+
err = init_status_page(engine);
if (err)
return err;
@@ -1939,13 +2058,13 @@ static const char *repr_timer(const struct timer_list *t)
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
- if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(i915)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
@@ -1966,7 +2085,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
- if (GRAPHICS_VER(dev_priv) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
@@ -1983,15 +2102,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
- else if (GRAPHICS_VER(dev_priv) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -2001,7 +2120,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
+ if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2067,7 +2186,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
rcu_read_unlock();
i915_sched_engine_active_unlock_bh(engine->sched_engine);
- } else if (GRAPHICS_VER(dev_priv) > 6) {
+ } else if (GRAPHICS_VER(i915) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 4fd54fb8810f..0a071e5da1a8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -341,6 +341,18 @@ struct intel_engine_guc_stats {
u64 start_gt_clk;
};
+union intel_engine_tlb_inv_reg {
+ i915_reg_t reg;
+ i915_mcr_reg_t mcr_reg;
+};
+
+struct intel_engine_tlb_inv {
+ bool mcr;
+ union intel_engine_tlb_inv_reg reg;
+ u32 request;
+ u32 done;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -372,6 +384,8 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ struct intel_engine_tlb_inv tlb_inv;
+
/*
* Some w/a require forcewake to be held (which prevents RC6) while
* a particular engine is active. If so, we set fw_domain to which
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 2af1ae3831df..5d143e2a8db0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -394,6 +394,7 @@
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+#define MI_DO_COMPARE REG_BIT(21)
#define STATE_BASE_ADDRESS \
((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
@@ -439,6 +440,8 @@
#define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
#define HECI1_FW_LIMIT_VALID (1 << 31)
+#define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6)
+
/*
* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index fcac1775e9c3..7ab3ca0f9f26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -33,7 +33,7 @@ struct intel_gsc {
} intf[INTEL_GSC_NUM_INTERFACES];
};
-void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915);
void intel_gsc_fini(struct intel_gsc *gsc);
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f0dbfc434e07..7a008e829d4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -28,7 +28,6 @@
#include "intel_migrate.h"
#include "intel_mocs.h"
#include "intel_pci_config.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
@@ -737,12 +736,12 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_gt;
- intel_uc_init_late(&gt->uc);
-
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;
+ intel_uc_init_late(&gt->uc);
+
intel_migrate_init(&gt->migrate, gt);
goto out_fw;
@@ -785,6 +784,29 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_gsc_fini(&gt->gsc);
/*
+ * If we unload the driver and wedge before the GSC worker is complete,
+ * the worker will hit an error on its submission to the GSC engine and
+ * then exit. This is hard to hit for a user, but it is reproducible
+ * with skipping selftests. The error is handled gracefully by the
+ * worker, so there are no functional issues, but we still end up with
+ * an error message in dmesg, which is something we want to avoid as
+ * this is a supported scenario. We could modify the worker to better
+ * handle a wedging occurring during its execution, but that gets
+ * complicated for a couple of reasons:
+ * - We do want the error on runtime wedging, because there are
+ * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
+ * only the error on driver unload that we want to silence.
+ * - The worker is responsible for multiple submissions (GSC FW load,
+ * HuC auth, SW proxy), so all of those will have to be adapted to
+ * handle the wedged_on_fini scenario.
+ * Therefore, it's much simpler to just wait for the worker to be done
+ * before wedging on driver removal, also considering that the worker
+ * will likely already be idle in the great majority of non-selftest
+ * scenarios.
+ */
+ intel_gsc_uc_flush_work(&gt->uc.gsc);
+
+ /*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
@@ -982,35 +1004,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
-struct reg_and_bit {
- union {
- i915_reg_t reg;
- i915_mcr_reg_t mcr_reg;
- };
- u32 bit;
-};
-
-static struct reg_and_bit
-get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
- const i915_reg_t *regs, const unsigned int num)
-{
- const unsigned int class = engine->class;
- struct reg_and_bit rb = { };
-
- if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg))
- return rb;
-
- rb.reg = regs[class];
- if (gen8 && class == VIDEO_DECODE_CLASS)
- rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
- else
- rb.bit = engine->instance;
-
- rb.bit = BIT(rb.bit);
-
- return rb;
-}
-
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -1024,14 +1017,20 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
-static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
+static int wait_for_invalidate(struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
+ if (engine->tlb_inv.mcr)
+ return intel_gt_mcr_wait_for_reg(engine->gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
- return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
+ return __intel_wait_for_register_fw(engine->gt->uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
@@ -1039,62 +1038,14 @@ static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
static void mmio_invalidate_full(struct intel_gt *gt)
{
- static const i915_reg_t gen8_regs[] = {
- [RENDER_CLASS] = GEN8_RTCR,
- [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
- [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
- [COPY_ENGINE_CLASS] = GEN8_BTCR,
- };
- static const i915_reg_t gen12_regs[] = {
- [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
- };
- static const i915_mcr_reg_t xehp_regs[] = {
- [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR,
- };
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
- const i915_reg_t *regs;
- unsigned int num = 0;
unsigned long flags;
- /*
- * New platforms should not be added with catch-all-newer (>=)
- * condition so that any later platform added triggers the below warning
- * and in turn mandates a human cross-check of whether the invalidation
- * flows have compatible semantics.
- *
- * For instance with the 11.00 -> 12.00 transition three out of five
- * respective engine registers were moved to masked type. Then after the
- * 12.00 -> 12.50 transition multi cast handling is required too.
- */
-
- if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
- regs = NULL;
- num = ARRAY_SIZE(xehp_regs);
- } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
- regs = gen12_regs;
- num = ARRAY_SIZE(gen12_regs);
- } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
- regs = gen8_regs;
- num = ARRAY_SIZE(gen8_regs);
- } else if (GRAPHICS_VER(i915) < 8) {
- return;
- }
-
- if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!"))
+ if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -1104,33 +1055,18 @@ static void mmio_invalidate_full(struct intel_gt *gt)
awake = 0;
for_each_engine(engine, gt, id) {
- struct reg_and_bit rb;
-
if (!intel_engine_pm_is_awake(engine))
continue;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- u32 val = BIT(engine->instance);
-
- if (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS)
- val = _MASKED_BIT_ENABLE(val);
+ if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
- xehp_regs[engine->class],
- val);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
- if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS))
- rb.bit = _MASKED_BIT_ENABLE(rb.bit);
-
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
- }
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.request);
+ else
+ intel_uncore_write_fw(uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.request);
+
awake |= engine->mask;
}
@@ -1149,17 +1085,9 @@ static void mmio_invalidate_full(struct intel_gt *gt)
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
- struct reg_and_bit rb;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- rb.mcr_reg = xehp_regs[engine->class];
- rb.bit = BIT(engine->instance);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- }
-
- if (wait_for_invalidate(gt, rb))
- gt_err_ratelimited(gt, "%s TLB invalidation did not complete in %ums!\n",
+ if (wait_for_invalidate(engine))
+ gt_err_ratelimited(gt,
+ "%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
@@ -1205,3 +1133,7 @@ unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 3bb1c701d5ff..0b414eae1683 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -35,7 +35,7 @@
* ignored.
*/
-#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+#define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
@@ -364,6 +364,7 @@ static u32 rw_with_mcr_steering(struct intel_gt *gt,
* function call.
*/
void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+ __acquires(&gt->mcr_lock)
{
unsigned long __flags;
int err = 0;
@@ -410,6 +411,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* Context: Releases gt->mcr_lock
*/
void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+ __releases(&gt->mcr_lock)
{
spin_unlock_irqrestore(&gt->mcr_lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index cef3d6f5c34e..e02cb90723ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -17,35 +17,13 @@
#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
-#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
-static void mtl_media_busy(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
@@ -93,9 +71,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
- /* Wa_14017073508: mtl */
- mtl_media_busy(gt);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -145,9 +120,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- /* Wa_14017073508: mtl */
- mtl_media_idle(gt);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 83df4cd5e06c..80dbbef86b1d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data)
}
DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
- perf_limit_reasons_clear, "%llu\n");
+ perf_limit_reasons_clear, "0x%llx\n");
void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
index 5d9da355ce24..55a336a9ff06 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_print.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -28,6 +28,9 @@
#define gt_err_ratelimited(_gt, _fmt, ...) \
drm_err_ratelimited(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+#define gt_notice_ratelimited(_gt, _fmt, ...) \
+ dev_notice_ratelimited((_gt)->i915->drm.dev, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
#define gt_probe_error(_gt, _fmt, ...) \
do { \
if (i915_error_injected()) \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index be0f6e305c88..4aecb5a7b631 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -9,8 +9,6 @@
#include "i915_reg_defs.h"
#include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */
-#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
-
/*
* The perf control registers are technically multicast registers, but the
* driver never needs to read/write them directly; we only use them to build
@@ -480,6 +478,9 @@
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define COMMON_SLICE_CHICKEN4 _MMIO(0x7300)
+#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
@@ -769,9 +770,6 @@
#define GEN10_DFR_RATIO_EN_AND_CHICKEN MCR_REG(0x9550)
#define DFR_DISABLE (1 << 9)
-#define INF_UNIT_LEVEL_CLKGATE MCR_REG(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
#define MICRO_BP0_0 _MMIO(0x9800)
#define MICRO_BP0_2 _MMIO(0x9804)
#define MICRO_BP0_1 _MMIO(0x9808)
@@ -1093,6 +1091,7 @@
#define XEHP_BLT_TLB_INV_CR MCR_REG(0xcee4)
#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
+#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 6629e4c72b6b..33cba406b569 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -72,7 +72,7 @@ static void kobj_gt_release(struct kobject *kobj)
{
}
-static struct kobj_type kobj_gt_type = {
+static const struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 5c91622dfca4..f4150f61f39c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
}
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA) {
+ drm_notice(&i915->drm,
+ "Media RC6 disabled on A step\n");
+ return false;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f3ad93db0b21..89fdfc67f8d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -158,7 +158,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1(uncore->i915))
return false;
*start = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 9312b29f5a97..80351f0a856c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -51,7 +51,7 @@ struct intel_reset {
/**
* Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
+ * that wait for i915->mm.wedged to settle.
*/
wait_queue_head_t queue;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 827adb0cfaea..3fd795c3263f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1052,9 +1052,9 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
static void ring_release(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
+ drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index f5d7b5126433..4d0dc9de23f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1677,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1686,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- i915->mem_freq = 800;
- break;
- case 2:
- i915->mem_freq = 1066;
- break;
- case 3:
- i915->mem_freq = 1333;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1727,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps)
static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1736,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_cck_read(i915, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- i915->mem_freq = 2000;
- break;
- default:
- i915->mem_freq = 1600;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 9173ec75f2b8..6507fa3f6d1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
+ * i915->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index aa87d3832d60..d7e8c374f153 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -27,7 +27,7 @@ struct drm_printer;
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
* I915_MAX_SS_FUSE_BITS value below).
*/
-#define GEN_MAX_SS_PER_HSW_SLICE 6
+#define GEN_MAX_SS_PER_HSW_SLICE 8
/*
* Maximum number of 32-bit registers used by hardware to express the
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 485c5cc5d0f9..e7ee24bcad89 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -743,9 +743,13 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_224,
0, false);
- if (!IS_DG1(i915))
+ if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
+
+ /* Wa_1606376872 */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
+ }
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1470,54 +1474,17 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gen12_gt_workarounds_init(gt, wal);
-
- /* Wa_1409420604:tgl */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
-
- /* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
- /* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
-}
-
-static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
-
gen12_gt_workarounds_init(gt, wal);
- /* Wa_1607087056:dg1 */
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
/* Wa_1409420604:dg1 */
- if (IS_DG1(i915))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
+ wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
- if (IS_DG1(i915))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1530,6 +1497,12 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1409757795:xehpsdv */
wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
/* Wa_16011155590:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
@@ -1579,6 +1552,9 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014368820:xehpsdv */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010670810:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1681,13 +1657,6 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
-
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
@@ -1700,6 +1669,9 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010648519:dg2 */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1715,6 +1687,9 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16016694945 */
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
static void
@@ -1755,11 +1730,38 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
debug_dump_steering(gt);
}
+/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them through the
+ * workaround infrastructure to make sure they're (re)applied at the proper
+ * times.
+ *
+ * The programming in this function is for settings that persist through
+ * engine resets and also are not part of any engine's register state context.
+ * I.e., settings that only need to be re-applied in the event of a full GT
+ * reset.
+ */
+static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ wa_mcr_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
+ }
+
+ if (IS_DG2(gt->i915)) {
+ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+}
+
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ gt_tuning_settings(gt, wal);
+
if (gt->type == GT_MEDIA) {
if (MEDIA_VER(i915) >= 13)
xelpmp_gt_workarounds_init(gt, wal);
@@ -1779,8 +1781,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
- else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
@@ -2187,37 +2187,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
+
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
+
break;
default:
break;
}
}
-static void dg1_whitelist_build(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- tgl_whitelist_build(engine);
-
- /* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
- (engine->class == RENDER_CLASS ||
- engine->class == COPY_ENGINE_CLASS))
- whitelist_reg_ext(w, RING_ID(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
-}
-
-static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
-{
- allow_read_ctx_timestamp(engine);
-}
-
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
- allow_read_ctx_timestamp(engine);
-
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -2234,6 +2217,9 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
break;
case COMPUTE_CLASS:
/* Wa_16011157294:dg2_g10 */
@@ -2265,12 +2251,25 @@ static void blacklist_trtt(struct intel_engine_cs *engine)
static void pvc_whitelist_build(struct intel_engine_cs *engine)
{
- allow_read_ctx_timestamp(engine);
-
/* Wa_16014440446:pvc */
blacklist_trtt(engine);
}
+static void mtl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2279,15 +2278,13 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, engine->gt, "whitelist", engine->name);
if (IS_METEORLAKE(i915))
- ; /* noop; none at this time */
+ mtl_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
- xehpsdv_whitelist_build(engine);
- else if (IS_DG1(i915))
- dg1_whitelist_build(engine);
+ ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2452,16 +2449,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
/* Wa_22010430635:dg2 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010648519:dg2 */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2482,27 +2475,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
true);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1607138336:tgl[a0],dg1[a0]
- * Wa_1607063988:tgl[a0],dg1[a0]
- */
- wa_write_or(wal,
- GEN9_CTX_PREEMPT_REG,
- GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- }
-
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1606679103:tgl
- * (see also Wa_1606682166:icl)
- */
- wa_write_or(wal,
- GEN7_SARCHKMD,
- GEN7_DISABLE_SAMPLER_PREFETCH);
- }
-
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
@@ -2532,30 +2504,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
+ /* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
- */
+ /* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
- * Wa_1607030317:tgl
- * Wa_1607186500:tgl
- * Wa_1607297627:tgl,rkl,dg1[a0],adlp
+ * Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
- * For DG1 this only applies to A0.
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
@@ -2975,16 +2939,8 @@ static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
- if (IS_PONTEVECCHIO(i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
- if (IS_DG2(i915)) {
- wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ if (IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
- }
/*
* This tuning setting proves beneficial only on ATS-M designs; the
@@ -3066,11 +3022,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0, false);
}
- if (IS_PONTEVECCHIO(i915)) {
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
@@ -3082,18 +3033,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-
/* Wa_14010449647:xehpsdv */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index b46425aeb2f0..0971241707ce 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -63,8 +63,8 @@ static void measure_clocks(struct intel_engine_cs *engine,
udelay(1000);
- dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += read_timestamp(engine);
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
local_irq_enable();
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index cfd736d88939..779fadcec7c4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -3,7 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include "intel_pm.h" /* intel_gpu_freq() */
#include "selftest_llc.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6755bbc4ebda..84e77e8dbba1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -299,13 +299,13 @@ int live_rps_clock_interval(void *arg)
for (i = 0; i < 5; i++) {
preempt_disable();
- dt_[i] = ktime_get();
cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_get();
udelay(1000);
- dt_[i] = ktime_sub(ktime_get(), dt_[i]);
cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
preempt_enable();
}
@@ -537,8 +537,8 @@ static u64 __measure_frequency(u32 *cntr, int duration_ms)
{
u64 dc, dt;
- dt = ktime_get();
dc = READ_ONCE(*cntr);
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = READ_ONCE(*cntr) - dc;
dt = ktime_get() - dt;
@@ -566,8 +566,8 @@ static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
{
u64 dc, dt;
- dt = ktime_get();
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
dt = ktime_get() - dt;
@@ -1094,8 +1094,8 @@ static u64 __measure_power(int duration_ms)
{
u64 dE, dt;
- dt = ktime_get();
dE = librapl_energy_uJ();
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dE = librapl_energy_uJ() - dE;
dt = ktime_get() - dt;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
new file mode 100644
index 000000000000..e6cac1f15d6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
+
+#include "gen8_engine_cs.h"
+#include "i915_gem_ww.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val)
+{
+ GEM_BUG_ON(addr < i915_vma_offset(vma));
+ GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val));
+ memset64(page_mask_bits(vma->obj->mm.mapping) +
+ (addr - i915_vma_offset(vma)), val, 1);
+}
+
+static int
+pte_tlbinv(struct intel_context *ce,
+ struct i915_vma *va,
+ struct i915_vma *vb,
+ u64 align,
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
+ u64 length,
+ struct rnd_state *prng)
+{
+ struct drm_i915_gem_object *batch;
+ struct drm_mm_node vb_node;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u64 addr;
+ int err;
+ u32 *cs;
+
+ batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ vma = i915_vma_instance(batch, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out;
+
+ /* Pin va at random but aligned offset after vma */
+ addr = round_up(vma->node.start + vma->node.size, align);
+ /* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */
+ addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
+ va->size, align);
+ err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER);
+ if (err) {
+ pr_err("Cannot pin at %llx+%llx\n", addr, va->size);
+ goto out;
+ }
+ GEM_BUG_ON(i915_vma_offset(va) != addr);
+ if (vb != va) {
+ vb_node = vb->node;
+ vb->node = va->node; /* overwrites the _same_ PTE */
+ }
+
+ /*
+ * Now choose random dword at the 1st pinned page.
+ *
+ * SZ_64K pages on dg1 require that the whole PT be marked
+ * containing 64KiB entries. So we make sure that vma
+ * covers the whole PT, despite being randomly aligned to 64KiB
+ * and restrict our sampling to the 2MiB PT within where
+ * we know that we will be using 64KiB pages.
+ */
+ if (align == SZ_64K)
+ addr = round_up(addr, SZ_2M);
+ addr = igt_random_offset(prng, addr, addr + align, 8, 8);
+
+ if (va != vb)
+ pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg,
+ addr & -length, length);
+
+ cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
+ *cs++ = MI_NOOP; /* for later termination */
+ /*
+ * Sample the target to see if we spot the updated backing store.
+ * Gen8 VCS compares immediate value with bitwise-and of two
+ * consecutive DWORDS pointed by addr, other gen/engines compare value
+ * with DWORD pointed by addr. Moreover we want to exercise DWORD size
+ * invalidations. To fulfill all these requirements below values
+ * have been chosen.
+ */
+ *cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2;
+ *cs++ = 0; /* break if *addr == 0 */
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+ vma_set_qw(va, addr, -1);
+ vma_set_qw(vb, addr, 0);
+
+ /* Keep sampling until we get bored */
+ *cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
+
+ i915_gem_object_flush_map(batch);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_va;
+ }
+
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
+ if (err) {
+ i915_request_add(rq);
+ goto out_va;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /* Short sleep to sanitycheck the batch is spinning before we begin */
+ msleep(10);
+ if (va == vb) {
+ if (!i915_request_completed(rq)) {
+ pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg);
+ err = -EIO;
+ }
+ } else if (!i915_request_completed(rq)) {
+ struct i915_vma_resource vb_res = {
+ .bi.pages = vb->obj->mm.pages,
+ .bi.page_sizes = vb->obj->mm.page_sizes,
+ .start = i915_vma_offset(vb),
+ .vma_size = i915_vma_size(vb)
+ };
+ unsigned int pte_flags = 0;
+
+ /* Flip the PTE between A and B */
+ if (i915_gem_object_is_lmem(vb->obj))
+ pte_flags |= PTE_LM;
+ ce->vm->insert_entries(ce->vm, &vb_res, 0, pte_flags);
+
+ /* Flush the PTE update to concurrent HW */
+ tlbinv(ce->vm, addr & -length, length);
+
+ if (wait_for(i915_request_completed(rq), HZ / 2)) {
+ pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n",
+ ce->engine->name);
+ err = -EINVAL;
+ }
+ } else {
+ pr_err("Spinner ended unexpectedly\n");
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+ cs = page_mask_bits(batch->mm.mapping);
+ *cs = MI_BATCH_BUFFER_END;
+ wmb();
+
+out_va:
+ if (vb != va)
+ vb->node = vb_node;
+ i915_vma_unpin(va);
+ if (i915_vma_unbind_unlocked(va))
+ err = -EIO;
+out:
+ i915_gem_object_put(batch);
+ return err;
+}
+
+static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
+{
+ /*
+ * Allocation of largest possible page size allows to test all types
+ * of pages.
+ */
+ return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
+{
+ /*
+ * SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1).
+ * While that does not require the whole 2M block to be contiguous
+ * it is easier to make it so, since we need that for SZ_2M pagees.
+ * Since we randomly offset the start of the vma, we need a 4M object
+ * so that there is a 2M range within it is suitable for SZ_64K PTE.
+ */
+ return i915_gem_object_create_internal(gt->i915, SZ_4M);
+}
+
+static int
+mem_tlbinv(struct intel_gt *gt,
+ struct drm_i915_gem_object *(*create_fn)(struct intel_gt *),
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
+{
+ unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *A, *B;
+ struct i915_ppgtt *ppgtt;
+ struct i915_vma *va, *vb;
+ enum intel_engine_id id;
+ I915_RND_STATE(prng);
+ void *vaddr;
+ int err;
+
+ /*
+ * Check that the TLB invalidate is able to revoke an active
+ * page. We load a page into a spinning COND_BBE loop and then
+ * remap that page to a new physical address. The old address, and
+ * so the loop keeps spinning, is retained in the TLB cache until
+ * we issue an invalidate.
+ */
+
+ A = create_fn(gt);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_a;
+ }
+
+ B = create_fn(gt);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto out_a;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_b;
+ }
+
+ GEM_BUG_ON(A->base.size != B->base.size);
+ if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
+ pr_warn("Failed to allocate contiguous pages for size %zx\n",
+ A->base.size);
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_b;
+ }
+
+ va = i915_vma_instance(A, &ppgtt->vm, NULL);
+ if (IS_ERR(va)) {
+ err = PTR_ERR(va);
+ goto out_vm;
+ }
+
+ vb = i915_vma_instance(B, &ppgtt->vm, NULL);
+ if (IS_ERR(vb)) {
+ err = PTR_ERR(vb);
+ goto out_vm;
+ }
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct i915_gem_ww_ctx ww;
+ struct intel_context *ce;
+ int bit;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+
+ for_i915_gem_ww(&ww, err, true)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_put;
+
+ for_each_set_bit(bit,
+ (unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes,
+ BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) {
+ unsigned int len;
+
+ if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
+ continue;
+
+ /* sanitycheck the semaphore wake up */
+ err = pte_tlbinv(ce, va, va,
+ BIT_ULL(bit),
+ NULL, SZ_4K,
+ &prng);
+ if (err)
+ goto err_unpin;
+
+ for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) {
+ err = pte_tlbinv(ce, va, vb,
+ BIT_ULL(bit),
+ tlbinv,
+ BIT_ULL(len),
+ &prng);
+ if (err)
+ goto err_unpin;
+ if (len == ppgtt_size)
+ break;
+ }
+ }
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+out_vm:
+ i915_vm_put(&ppgtt->vm);
+out_b:
+ i915_gem_object_put(B);
+out_a:
+ i915_gem_object_put(A);
+ return err;
+}
+
+static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
+{
+ intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+}
+
+static int invalidate_full(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int err;
+
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0; /* TLB invalidate not implemented */
+
+ err = mem_tlbinv(gt, create_smem, tlbinv_full);
+ if (err == 0)
+ err = mem_tlbinv(gt, create_lmem, tlbinv_full);
+ if (err == -ENODEV || err == -ENXIO)
+ err = 0;
+
+ return err;
+}
+
+int intel_tlb_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(invalidate_full),
+ };
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ int err;
+
+ if (intel_gt_is_wedged(gt))
+ continue;
+
+ err = intel_gt_live_subtests(tests, gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index f2d9858d827c..021f51d9b456 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -24,37 +24,37 @@ static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
static ssize_t
name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+ return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
}
-static struct kobj_attribute name_attr =
+static const struct kobj_attribute name_attr =
__ATTR(name, 0444, name_show, NULL);
static ssize_t
class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
}
-static struct kobj_attribute class_attr =
+static const struct kobj_attribute class_attr =
__ATTR(class, 0444, class_show, NULL);
static ssize_t
inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
}
-static struct kobj_attribute inst_attr =
+static const struct kobj_attribute inst_attr =
__ATTR(instance, 0444, inst_show, NULL);
static ssize_t
mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+ return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
}
-static struct kobj_attribute mmio_attr =
+static const struct kobj_attribute mmio_attr =
__ATTR(mmio_base, 0444, mmio_show, NULL);
static const char * const vcs_caps[] = {
@@ -107,11 +107,9 @@ __caps_show(struct intel_engine_cs *engine,
for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
if (n >= count || !repr[n]) {
if (GEM_WARN_ON(show_unknown))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "[%x] ", n);
+ len += sysfs_emit_at(buf, len, "[%x] ", n);
} else {
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s ", repr[n]);
+ len += sysfs_emit_at(buf, len, "%s ", repr[n]);
}
if (GEM_WARN_ON(len >= PAGE_SIZE))
break;
@@ -127,7 +125,7 @@ caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(engine, engine->uabi_capabilities, buf, true);
}
-static struct kobj_attribute caps_attr =
+static const struct kobj_attribute caps_attr =
__ATTR(capabilities, 0444, caps_show, NULL);
static ssize_t
@@ -136,7 +134,7 @@ all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(kobj_to_engine(kobj), -1, buf, false);
}
-static struct kobj_attribute all_caps_attr =
+static const struct kobj_attribute all_caps_attr =
__ATTR(known_capabilities, 0444, all_caps_show, NULL);
static ssize_t
@@ -182,10 +180,10 @@ max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_attr =
+static const struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
@@ -193,10 +191,10 @@ max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_def =
+static const struct kobj_attribute max_spin_def =
__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
static ssize_t
@@ -236,10 +234,10 @@ timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
@@ -247,10 +245,10 @@ timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
static ssize_t
@@ -287,10 +285,10 @@ stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_attr =
+static const struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
@@ -298,10 +296,10 @@ stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_def =
+static const struct kobj_attribute stop_timeout_def =
__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
static ssize_t
@@ -343,10 +341,10 @@ preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
@@ -355,10 +353,10 @@ preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
static ssize_t
@@ -399,10 +397,10 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_attr =
+static const struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
static ssize_t
@@ -410,10 +408,10 @@ heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_def =
+static const struct kobj_attribute heartbeat_interval_def =
__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
static void kobj_engine_release(struct kobject *kobj)
@@ -421,7 +419,7 @@ static void kobj_engine_release(struct kobject *kobj)
kfree(kobj);
}
-static struct kobj_type kobj_engine_type = {
+static const struct kobj_type kobj_engine_type = {
.release = kobj_engine_release,
.sysfs_ops = &kobj_sysfs_ops
};
@@ -449,7 +447,7 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
static void add_defaults(struct kobj_engine *parent)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&max_spin_def.attr,
&stop_timeout_def.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
@@ -485,7 +483,7 @@ static void add_defaults(struct kobj_engine *parent)
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&name_attr.attr,
&class_attr.attr,
&inst_attr.attr,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e73d4440c5e8..1d9fdfb11268 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -6,6 +6,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
#include "intel_gsc_fw.h"
@@ -88,9 +89,8 @@ out_rq:
i915_request_put(rq);
if (err)
- drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
- "Request submission for GSC load failed (%d)\n",
- err);
+ gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -200,8 +200,7 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
- drm_info(&gt->i915->drm, "Loaded GSC firmware %s\n",
- gsc_fw->file_selected.path);
+ gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index fd21dbd2663b..2d5b70b3384c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
#include "i915_drv.h"
@@ -59,7 +60,6 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
struct i915_vma *vma;
@@ -81,8 +81,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
I915_GEM_HWS_GSC_ADDR,
&gsc_lock, "gsc_context");
if (IS_ERR(ce)) {
- drm_err(&gt->i915->drm,
- "failed to create GSC CS ctx for FW communication\n");
+ gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
err = PTR_ERR(ce);
goto out_vma;
}
@@ -98,7 +97,7 @@ out_vma:
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
- i915_probe_error(i915, "failed with %d\n", err);
+ gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
return err;
}
@@ -117,7 +116,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
intel_uc_fw_fini(&gsc->fw);
}
-void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
@@ -125,6 +124,25 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
flush_work(&gsc->work);
}
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ /*
+ * we only want to start the GSC worker from here in the actual resume
+ * flow and not during driver load. This is because GSC load is slow and
+ * therefore we want to make sure that the default state init completes
+ * first to not slow down the init thread. A separate call to
+ * intel_gsc_uc_load_start will ensure that the GSC is loaded during
+ * driver load.
+ */
+ if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
+ return;
+
+ intel_gsc_uc_load_start(gsc);
+}
+
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
index 03fd0a8e8db1..5f50fa1ff8b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -26,6 +26,8 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
int intel_gsc_uc_init(struct intel_gsc_uc *gsc);
void intel_gsc_uc_fini(struct intel_gsc_uc *gsc);
void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
new file mode 100644
index 000000000000..ea0da06e2f39
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
+#include "intel_gsc_uc_heci_cmd_submit.h"
+
+struct gsc_heci_pkt {
+ u64 addr_in;
+ u32 size_in;
+ u64 addr_out;
+ u32 size_out;
+};
+
+static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GSC_HECI_CMD_PKT;
+ *cs++ = lower_32_bits(pkt->addr_in);
+ *cs++ = upper_32_bits(pkt->addr_in);
+ *cs++ = pkt->size_in;
+ *cs++ = lower_32_bits(pkt->addr_out);
+ *cs++ = upper_32_bits(pkt->addr_out);
+ *cs++ = pkt->size_out;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in,
+ u32 size_in, u64 addr_out,
+ u32 size_out)
+{
+ struct intel_context *ce = gsc->ce;
+ struct i915_request *rq;
+ struct gsc_heci_pkt pkt = {
+ .addr_in = addr_in,
+ .size_in = size_in,
+ .addr_out = addr_out,
+ .size_out = size_out
+ };
+ int err;
+
+ if (!ce)
+ return -ENODEV;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = emit_gsc_heci_pkt(rq, &pkt);
+
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ if (err)
+ drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
+ "Request submission for GSC heci cmd failed (%d)\n",
+ err);
+
+ return err;
+}
+
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id)
+{
+ host_session_id &= ~HOST_SESSION_MASK;
+ if (heci_client_id == HECI_MEADDRESS_PXP)
+ host_session_id |= HOST_SESSION_PXP_SINGLE;
+
+ header->validity_marker = GSC_HECI_VALIDITY_MARKER;
+ header->heci_client_id = heci_client_id;
+ header->host_session_handle = host_session_id;
+ header->header_version = MTL_GSC_HEADER_VERSION;
+ header->message_size = message_size;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
new file mode 100644
index 000000000000..3d56ae501991
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+#define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+struct intel_gsc_mtl_header {
+ u32 validity_marker;
+#define GSC_HECI_VALIDITY_MARKER 0xA578875A
+
+ u8 heci_client_id;
+#define HECI_MEADDRESS_PXP 17
+#define HECI_MEADDRESS_HDCP 18
+
+ u8 reserved1;
+
+ u16 header_version;
+#define MTL_GSC_HEADER_VERSION 1
+
+ /*
+ * FW allows host to decide host_session handle
+ * as it sees fit.
+ * For intertracebility reserving select bits(60-63)
+ * to differentiate caller-target subsystem
+ * 0000 - HDCP
+ * 0001 - PXP Single Session
+ */
+ u64 host_session_handle;
+#define HOST_SESSION_MASK REG_GENMASK64(63, 60)
+#define HOST_SESSION_PXP_SINGLE BIT_ULL(60)
+ u64 gsc_message_handle;
+
+ u32 message_size; /* lower 20 bits only, upper 12 are reserved */
+
+ /*
+ * Flags mask:
+ * Bit 0: Pending
+ * Bit 1: Session Cleanup;
+ * Bits 2-15: Flags
+ * Bits 16-31: Extension Size
+ * According to internal spec flags are either input or output
+ * we distinguish the flags using OUTFLAG or INFLAG
+ */
+ u32 flags;
+#define GSC_OUTFLAG_MSG_PENDING 1
+
+ u32 status;
+} __packed;
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc,
+ u64 addr_in, u32 size_in,
+ u64 addr_out, u32 size_out);
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id);
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index fc3b994626a4..cf49188db6a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -15,6 +15,7 @@
#include "guc_capture_fwif.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
@@ -353,7 +354,6 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
u32 ipver)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct sseu_dev_info *sseu;
int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
@@ -402,7 +402,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
}
}
- drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
guc->capture->extlists = extlists;
}
@@ -477,7 +477,6 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
u32 i = 0, j = 0;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
@@ -509,8 +508,7 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
}
}
if (i < num_entries)
- drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
- (int)i, (int)num_entries);
+ guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
return 0;
}
@@ -540,12 +538,11 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
if (!gc->reglists) {
- drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ guc_warn(guc, "No capture reglist for this device\n");
return -ENODEV;
}
@@ -557,9 +554,9 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
!guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ guc_warn(guc, "Missing capture reglist: global!\n");
else
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
__stringify_type(type), type,
__stringify_engclass(classid), classid);
return -ENODATA;
@@ -592,7 +589,6 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_debug_capture_list *listnode;
int ret, num_regs;
u8 *caplist, *tmp;
@@ -623,7 +619,7 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
caplist = kzalloc(size, GFP_KERNEL);
if (!caplist) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ guc_dbg(guc, "Failed to alloc cached register capture list");
return -ENOMEM;
}
@@ -653,7 +649,6 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
void **outptr, size_t *size)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tmp = sizeof(u32) * 4;
void *null_header;
@@ -665,7 +660,7 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
null_header = kzalloc(tmp, GFP_KERNEL);
if (!null_header) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ guc_dbg(guc, "Failed to alloc cached register capture null list");
return -ENOMEM;
}
@@ -727,7 +722,6 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
static void check_guc_capture_size(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int min_size = guc_capture_output_min_size_est(guc);
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
@@ -741,13 +735,13 @@ static void check_guc_capture_size(struct intel_guc *guc)
* INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
*/
if (min_size < 0)
- drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
- drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
- drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
buffer_size, spare_size, min_size);
}
@@ -848,7 +842,6 @@ static int
guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
u32 *dw)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tries = 2;
int avail = 0;
u32 *src_data;
@@ -865,7 +858,7 @@ guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *
return 4;
}
if (avail)
- drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
buf->rd = 0;
}
@@ -1118,13 +1111,12 @@ static void
__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *node = NULL;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int i;
for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
node = guc_capture_alloc_one_node(guc);
if (!node) {
- drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ guc_warn(guc, "Register capture pre-alloc-cache failure\n");
/* dont free the priors, use what we got and cleanup at shutdown */
return;
}
@@ -1169,7 +1161,6 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc)
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct __guc_capture_parsed_output *node = NULL;
@@ -1183,7 +1174,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
if (!i)
return -ENODATA;
if (i % sizeof(u32)) {
- drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ guc_warn(guc, "Got mis-aligned register capture entries\n");
ret = -EIO;
goto bailout;
}
@@ -1301,7 +1292,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
break;
}
if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ guc_dbg(guc, "Register capture missing global dump: %08x!\n",
datatype);
}
node->is_partial = is_partial;
@@ -1322,7 +1313,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
if (numregs > guc->capture->max_mmio_per_node) {
- drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
numregs = guc->capture->max_mmio_per_node;
}
node->reginfo[datatype].num_regs = numregs;
@@ -1367,7 +1358,6 @@ static void __guc_capture_process_output(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, full_count;
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_log_buffer_state log_buf_state_local;
struct guc_log_buffer_state *log_buf_state;
struct __guc_capture_bufstate buf;
@@ -1403,7 +1393,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -1571,6 +1562,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
@@ -1586,13 +1598,11 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_context *ce)
{
struct __guc_capture_parsed_output *n, *ntmp;
- struct drm_i915_private *i915;
struct intel_guc *guc;
if (!gt || !ee || !ce)
return;
- i915 = gt->i915;
guc = &gt->uc.guc;
if (!guc->capture)
return;
@@ -1606,16 +1616,18 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
- n->guc_id && n->guc_id == ce->guc_id.id &&
- (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
- (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
return;
}
}
- drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+
+ guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
+ ce->guc_id.id, ce->lrc.lrca);
}
void intel_guc_capture_process(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index c3792ddeec80..195db8c9d420 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -333,8 +333,7 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
- "GuC log buffer overflow\n");
+ guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n");
}
return overflow;
@@ -521,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -544,9 +543,9 @@ static int guc_log_relay_create(struct intel_guc_log *log)
n_subbufs = 8;
guc_log_relay_chan = relay_open("guc_log",
- dev_priv->drm.primary->debugfs_root,
+ i915->drm.primary->debugfs_root,
subbuf_size, n_subbufs,
- &relay_callbacks, dev_priv);
+ &relay_callbacks, i915);
if (!guc_log_relay_chan) {
guc_err(guc, "Couldn't create relay channel for logging\n");
@@ -571,7 +570,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -580,7 +579,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -662,7 +661,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -676,12 +675,12 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -694,7 +693,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
index e75989d4ba06..2465d05638b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
@@ -30,6 +30,9 @@
#define guc_err_ratelimited(_guc, _fmt, ...) \
guc_printk((_guc), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define guc_notice_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), notice_ratelimited, _fmt, ##__VA_ARGS__)
+
#define guc_probe_error(_guc, _fmt, ...) \
guc_printk((_guc), probe_error, _fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index b5855091cf6a..1adec6de223c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -6,25 +6,15 @@
#include <linux/string_helpers.h>
#include "intel_guc_rc.h"
+#include "intel_guc_print.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
static bool __guc_rc_supported(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
- /*
- * Wa_14017073508: mtl
- * Do not enable gucrc to avoid additional interrupts which
- * may disrupt pcode wa.
- */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- return false;
-
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(gt->i915) >= 12;
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
@@ -70,13 +60,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to %s RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
- drm_info(&gt->i915->drm, "GuC RC: %s\n",
- str_enabled_disabled(enable));
+ guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 63464933cbce..026d73855f36 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_guc_print.h"
#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
@@ -171,14 +172,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
static int slpc_query_task_state(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- i915_probe_error(i915, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -188,15 +187,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int ret;
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -212,8 +210,8 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -236,9 +234,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_notice(&i915->drm,
- "Failed to send set_param for min freq(%d): (%d)\n",
- freq, ret);
+ guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
+ freq, ERR_PTR(ret));
}
return ret;
@@ -267,7 +264,6 @@ static void slpc_boost_work(struct work_struct *work)
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
@@ -275,9 +271,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- i915_probe_error(i915,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
return err;
}
@@ -338,7 +332,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
static int slpc_reset(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
@@ -346,15 +339,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- i915_probe_error(i915, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ guc_probe_error(guc, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -495,8 +487,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val < slpc->rp1_freq);
if (ret) {
- i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
+ ERR_PTR(ret));
goto out;
}
@@ -611,15 +603,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int slpc_min_freq;
int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
if (ret) {
- drm_err(&i915->drm,
- "Failed to get min freq: (%d)\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
return false;
}
@@ -685,9 +674,8 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
if (ret)
- drm_err(&i915->drm,
- "Override gucrc mode %d failed %d\n",
- mode, ret);
+ guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
+ mode, ERR_PTR(ret));
}
return ret;
@@ -702,9 +690,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
if (ret)
- drm_err(&i915->drm,
- "Unsetting gucrc mode failed %d\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
}
return ret;
@@ -725,7 +711,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
*/
int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(!slpc->vma);
@@ -734,8 +720,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -743,7 +728,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(to_gt(i915));
+ intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
@@ -753,16 +738,14 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 53f3ed3244d5..88e881b100cf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1352,6 +1352,16 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
return ns_to_ktime(total);
}
+static void guc_enable_busyness_worker(struct intel_guc *guc)
+{
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
+}
+
+static void guc_cancel_busyness_worker(struct intel_guc *guc)
+{
+ cancel_delayed_work_sync(&guc->timestamp.work);
+}
+
static void __reset_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1360,7 +1370,7 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1416,8 +1426,7 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
@@ -1432,20 +1441,26 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_init_engine_stats(struct intel_guc *guc)
+static int guc_init_engine_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ int ret;
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ ret = guc_action_enable_usage_stats(guc);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
- int ret = guc_action_enable_usage_stats(guc);
+ if (ret)
+ guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
+ else
+ guc_enable_busyness_worker(guc);
- if (ret)
- guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
- }
+ return ret;
+}
+
+static void guc_fini_engine_stats(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
}
void intel_guc_busyness_park(struct intel_gt *gt)
@@ -1460,7 +1475,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
* and causes an unclaimed register access warning. Cancel the worker
* synchronously here.
*/
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1487,8 +1502,7 @@ void intel_guc_busyness_unpark(struct intel_gt *gt)
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static inline bool
@@ -4102,9 +4116,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = guc_submit_request;
}
-static inline void guc_kernel_context_pin(struct intel_guc *guc,
- struct intel_context *ce)
+static inline int guc_kernel_context_pin(struct intel_guc *guc,
+ struct intel_context *ce)
{
+ int ret;
+
/*
* Note: we purposefully do not check the returns below because
* the registration can only fail if a reset is just starting.
@@ -4112,16 +4128,24 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc,
* isn't happening and even it did this code would be run again.
*/
- if (context_guc_id_invalid(ce))
- pin_guc_id(guc, ce);
+ if (context_guc_id_invalid(ce)) {
+ ret = pin_guc_id(guc, ce);
+
+ if (ret < 0)
+ return ret;
+ }
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
guc_context_init(ce);
- try_context_registration(ce, true);
+ ret = try_context_registration(ce, true);
+ if (ret)
+ unpin_guc_id(guc, ce);
+
+ return ret;
}
-static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+static inline int guc_init_submission(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -4148,9 +4172,17 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
- pinned_contexts_link)
- guc_kernel_context_pin(guc, ce);
+ pinned_contexts_link) {
+ int ret = guc_kernel_context_pin(guc, ce);
+
+ if (ret) {
+ /* No point in trying to clean up as i915 will wedge on failure */
+ return ret;
+ }
+ }
}
+
+ return 0;
}
static void guc_release(struct intel_engine_cs *engine)
@@ -4393,30 +4425,57 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
return ret;
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ u32 val;
- /* Enable and route to GuC */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
- GUC_SEM_INTR_ROUTE_TO_GUC |
- GUC_SEM_INTR_ENABLE_ALL);
+ if (GRAPHICS_VER(gt->i915) < 12)
+ return;
+
+ if (to_guc)
+ val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
+ else
+ val = 0;
- guc_init_lrc_mapping(guc);
- guc_init_engine_stats(guc);
- guc_init_global_schedule_policy(guc);
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
}
-void intel_guc_submission_disable(struct intel_guc *guc)
+int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ /* Semaphore interrupt enable and route to GuC */
+ guc_route_semaphores(guc, true);
- /* Note: By the time we're here, GuC may have already been reset */
+ ret = guc_init_submission(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_engine_stats(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ goto fail_stats;
+
+ return 0;
+
+fail_stats:
+ guc_fini_engine_stats(guc);
+fail_sem:
+ guc_route_semaphores(guc, false);
+ return ret;
+}
+
+/* Note: By the time we're here, GuC may have already been reset */
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
- /* Disable and route to host */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
+ /* Semaphore interrupt disable and route to host */
+ guc_route_semaphores(guc, false);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -4660,9 +4719,10 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Got GuC reset of 0x%04X, exiting = %d, banned = %d\n",
- ce->guc_id.id, test_bit(CONTEXT_EXITING, &ce->flags),
- test_bit(CONTEXT_BANNED, &ce->flags));
+ guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ ce->guc_id.id, ce->engine->name,
+ str_yes_no(intel_context_is_exiting(ce)),
+ str_yes_no(intel_context_is_banned(ce)));
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5a95a9f0a8e3..c57b29cdb1a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -15,7 +15,7 @@ struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-void intel_guc_submission_enable(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 410905da8e97..72884e21470b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -13,6 +14,15 @@
#include <linux/device/bus.h>
#include <linux/mei_aux.h>
+#define huc_printk(_huc, _level, _fmt, ...) \
+ gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__)
+#define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__)
+#define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__)
+#define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__)
+#define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__)
+#define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__)
+#define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__)
+
/**
* DOC: HuC
*
@@ -107,11 +117,9 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti
if (!intel_huc_is_authenticated(huc)) {
if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI GSC init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI GSC\n");
else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI PXP init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI PXP\n");
else
MISSING_CASE(huc->delayed_load.status);
@@ -174,8 +182,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d
case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
- drm_info(&huc_to_gt(huc)->i915->drm,
- "mei driver not bound, disabling HuC load\n");
+ huc_info(huc, "MEI driver not bound, disabling load\n");
gsc_init_error(huc);
break;
}
@@ -193,8 +200,7 @@ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus
huc->delayed_load.nb.notifier_call = gsc_notifier;
ret = bus_register_notifier(bus, &huc->delayed_load.nb);
if (ret) {
- drm_err(&huc_to_gt(huc)->i915->drm,
- "failed to register GSC notifier\n");
+ huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
huc->delayed_load.nb.notifier_call = NULL;
gsc_init_error(huc);
}
@@ -306,29 +312,25 @@ static int check_huc_loading_mode(struct intel_huc *huc)
GSC_LOADS_HUC;
if (fw_needs_gsc != hw_uses_gsc) {
- drm_err(&gt->i915->drm,
- "mismatch between HuC FW (%s) and HW (%s) load modes\n",
- HUC_LOAD_MODE_STRING(fw_needs_gsc),
- HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc));
return -ENOEXEC;
}
/* make sure we can access the GSC via the mei driver if we need it */
if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
fw_needs_gsc) {
- drm_info(&gt->i915->drm,
- "Can't load HuC due to missing MEI modules\n");
+ huc_info(huc, "can't load due to missing MEI modules\n");
return -EIO;
}
- drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+ huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc));
return 0;
}
int intel_huc_init(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
err = check_huc_loading_mode(huc);
@@ -345,7 +347,7 @@ int intel_huc_init(struct intel_huc *huc)
out:
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- drm_info(&i915->drm, "HuC init failed with %d\n", err);
+ huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
return err;
}
@@ -389,13 +391,13 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
delayed_huc_load_complete(huc);
if (ret) {
- drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
+ huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret));
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
- drm_info(&gt->i915->drm, "HuC authenticated\n");
+ huc_info(huc, "authenticated!\n");
return 0;
}
@@ -430,7 +432,7 @@ int intel_huc_auth(struct intel_huc *huc)
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
- DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret));
goto fail;
}
@@ -442,7 +444,7 @@ int intel_huc_auth(struct intel_huc *huc)
return 0;
fail:
- i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index de7f987cf611..4ccb4be4c9cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -83,15 +83,15 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
static void __confirm_options(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
- drm_dbg(&i915->drm,
- "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
- i915->params.enable_guc,
- str_yes_no(intel_uc_wants_guc(uc)),
- str_yes_no(intel_uc_wants_guc_submission(uc)),
- str_yes_no(intel_uc_wants_huc(uc)),
- str_yes_no(intel_uc_wants_guc_slpc(uc)));
+ gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
+ i915->params.enable_guc,
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
@@ -102,26 +102,22 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC submission is N/A");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC submission is N/A");
if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "undocumented flag");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -143,6 +139,7 @@ void intel_uc_init_early(struct intel_uc *uc)
void intel_uc_init_late(struct intel_uc *uc)
{
intel_guc_init_late(&uc->guc);
+ intel_gsc_uc_load_start(&uc->gsc);
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -535,8 +532,11 @@ static int __uc_init_hw(struct intel_uc *uc)
else
intel_huc_auth(huc);
- if (intel_uc_uses_guc_submission(uc))
- intel_guc_submission_enable(guc);
+ if (intel_uc_uses_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_log_capture;
+ }
if (intel_uc_uses_guc_slpc(uc)) {
ret = intel_guc_slpc_enable(&guc->slpc);
@@ -547,12 +547,8 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- intel_gsc_uc_load_start(&uc->gsc);
-
- gt_info(gt, "GuC submission %s\n",
- str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
- gt_info(gt, "GuC SLPC %s\n",
- str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+ guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
@@ -678,7 +674,7 @@ void intel_uc_suspend(struct intel_uc *uc)
int err;
/* flush the GSC worker */
- intel_gsc_uc_suspend(&uc->gsc);
+ intel_gsc_uc_flush_work(&uc->gsc);
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
@@ -720,6 +716,8 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return err;
}
+ intel_gsc_uc_resume(&uc->gsc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 65672ff82605..264c952f777b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_print.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -44,11 +45,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -562,15 +562,14 @@ static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = gt->i915;
struct uc_css_header *css;
size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -580,10 +579,9 @@ static int check_ccs_header(struct intel_gt *gt,
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm,
- "%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -596,18 +594,18 @@ static int check_ccs_header(struct intel_gt *gt,
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, size);
return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= gt->wopcm.size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- size, (size_t)gt->wopcm.size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ size, (size_t)gt->wopcm.size);
return -E2BIG;
}
@@ -635,20 +633,20 @@ static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
*/
if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_selected.ver.major,
- uc_fw->file_selected.ver.minor,
- uc_fw->file_selected.ver.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
return false;
}
if (!is_ver_8bit(&guc->submission_version)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- guc->submission_version.major,
- guc->submission_version.minor,
- guc->submission_version.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ guc->submission_version.major,
+ guc->submission_version.minor,
+ guc->submission_version.patch);
return false;
}
@@ -687,10 +685,9 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
return err;
if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&gt->i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+ gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
/* try to find another blob to load */
release_firmware(*fw);
@@ -768,10 +765,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
+ gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
@@ -786,16 +783,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
- drm_notice(&i915->drm,
- "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_wanted.path,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
- uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
+ gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
+ gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
}
if (HAS_LMEM(i915)) {
@@ -823,10 +818,10 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
- drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+ gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
+ gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
@@ -932,9 +927,9 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uncore_read_fw(uncore, DMA_CTRL));
+ gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
@@ -950,9 +945,8 @@ int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
- i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- err);
+ gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -1078,15 +1072,15 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out;
}
err = uc_fw_rsa_data_create(uc_fw);
if (err) {
- DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out_unpin;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index e28518fe8b90..1fd760539f77 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,6 +3,8 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
+#include "intel_guc_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -65,7 +67,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
goto err;
}
@@ -86,7 +88,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
goto err;
}
@@ -96,7 +98,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
i915_request_put(last[i]);
@@ -113,7 +115,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -153,7 +155,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- drm_err(&gt->i915->drm, "Context array allocation failed\n");
+ guc_err(guc, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -166,25 +168,25 @@ static int intel_guc_steal_guc_ids(void *arg)
ce[context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -192,9 +194,9 @@ static int intel_guc_steal_guc_ids(void *arg)
while (ret != -EAGAIN) {
ce[++context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
- ret = PTR_ERR(ce[context_index--]);
- ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
+ ce[context_index--] = NULL;
goto err_spin_rq;
}
@@ -203,8 +205,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
- context_index, ret);
+ guc_err(guc, "Failed to create request %d: %pe\n",
+ context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
@@ -218,7 +220,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
+ guc_err(guc, "Spin request failed to complete: %pe\n", ERR_PTR(ret));
i915_request_put(last);
goto err_spin_rq;
}
@@ -230,7 +232,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ guc_err(guc, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -238,7 +240,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
+ guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
goto err_spin_rq;
}
@@ -246,20 +248,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
+ guc_err(guc, "Request with stolen guc_id failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ guc_err(guc, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- drm_err(&gt->i915->drm, "No guc_id was stolen");
+ guc_err(guc, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index d91b58f70403..34b5d952e2bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -3,6 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -45,7 +46,7 @@ static int intel_hang_guc(void *arg)
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
- drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ gt_err(gt, "Failed get kernel context: %pe\n", ctx);
return PTR_ERR(ctx);
}
@@ -54,7 +55,7 @@ static int intel_hang_guc(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err;
}
@@ -63,13 +64,13 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -77,28 +78,28 @@ static int intel_hang_guc(void *arg)
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin;
}
ret = intel_reset_guc(gt);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC: %pe\n", ERR_PTR(ret));
goto err_spin;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ gt_err(gt, "Failed to reset GuC: status = 0x%08X\n", guc_status);
ret = -EIO;
goto err_spin;
}
@@ -107,12 +108,12 @@ static int intel_hang_guc(void *arg)
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin;
}
if (i915_reset_count(global) == reset_count) {
- drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ gt_err(gt, "Failed to record a GPU reset\n");
ret = -EINVAL;
goto err_spin;
}
@@ -132,7 +133,7 @@ err_spin:
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ gt_err(gt, "No-op failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index d17982c36d25..a40e7c32e613 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -3,6 +3,7 @@
* Copyright �� 2019 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -115,30 +116,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
+ gt_err(gt, "Failed creating contexts: %pe\n", parent);
return PTR_ERR(parent);
} else if (!parent) {
- drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
+ gt_dbg(gt, "Not enough engines in class: %d\n", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
+ gt_err(gt, "Failed creating requests: %pe\n", rq);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
+ gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret));
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 4d898b14de93..e0c5dfb788eb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -63,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -79,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -187,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
- ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE);
+ vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &=
+ ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK;
@@ -248,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -506,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -584,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
* @turnon: Turn ON/OFF vblank_timer
*
* This function is used to turn on/off or update the per-vGPU vblank_timer
- * when PIPECONF is enabled or disabled. vblank_timer period is also updated
+ * when TRANSCONF is enabled or disabled. vblank_timer period is also updated
* if guest changed the refresh rate.
*
*/
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 735fc83e7026..3c8e0d198c4f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -666,8 +666,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -697,12 +697,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & PIPECONF_ENABLE) {
- vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
+ if (data & TRANSCONF_ENABLE) {
+ vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
vgpu_update_refresh_rate(vgpu);
vgpu_update_vblank_emulation(vgpu, true);
} else {
- vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
+ vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
vgpu_update_vblank_emulation(vgpu, false);
}
return 0;
@@ -2262,10 +2262,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7412abf166a8..8ef93889061a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -422,12 +421,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
* we can use it to substitute for the pending idle-barrer
* request that we want to emit on the kernel_context.
*/
- __active_del_barrier(ref, node_from_active(active));
- return true;
+ return __active_del_barrier(ref, node_from_active(active));
}
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ u64 idx = i915_request_timeline(rq)->fence_context;
struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -437,16 +436,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
if (err)
return err;
- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
- if (!active) {
- err = -ENOMEM;
- goto out;
- }
+ do {
+ active = active_instance(ref, idx);
+ if (!active) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ } while (unlikely(is_barrier(active)));
- if (replace_barrier(ref, active)) {
- RCU_INIT_POINTER(active->fence, NULL);
- atomic_dec(&ref->count);
- }
if (!__i915_active_fence_set(active, fence))
__i915_active_acquire(ref);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 45773ce1deac..16011c0286ad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -52,7 +52,6 @@
#include "i915_irq.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index cf1c0970ecb4..da249337c23b 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -167,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+ pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -248,10 +250,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_pm_setup(dev_priv);
- ret = intel_power_domains_init(dev_priv);
- if (ret < 0)
- goto err_gem;
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
@@ -260,10 +258,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
return 0;
-err_gem:
- i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release_all(dev_priv);
- i915_drm_clients_fini(&dev_priv->clients);
err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
@@ -489,13 +483,17 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_memory_regions_hw_probe(dev_priv);
+ /*
+ * Make sure we probe lmem before we probe stolen-lmem. The BAR size
+ * might be different due to bar resizing.
+ */
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_ggtt;
- ret = intel_gt_tiles_init(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
- goto err_mem_regions;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
@@ -537,7 +535,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_pcode_init(dev_priv);
if (ret)
- goto err_msi;
+ goto err_opregion;
/*
* Fill the dram structure to get the system dram info. This will be
@@ -558,6 +556,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
+err_opregion:
+ intel_opregion_cleanup(dev_priv);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -583,6 +583,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
+ intel_opregion_cleanup(dev_priv);
+
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -936,7 +938,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
- intel_fbdev_restore_mode(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_fbdev_restore_mode(i915);
vga_switcheroo_process_delayed_switch();
}
@@ -1002,7 +1006,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
- intel_dmc_ucode_suspend(i915);
+ intel_dmc_suspend(i915);
i915_gem_suspend(i915);
@@ -1032,6 +1036,13 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static void i915_drm_complete(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_resume_complete(i915->pxp);
+}
+
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
@@ -1072,8 +1083,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev_priv);
-
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
@@ -1087,7 +1096,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_dmc_ucode_suspend(dev_priv);
+ intel_dmc_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1208,7 +1217,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
- intel_dmc_ucode_resume(dev_priv);
+ intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
@@ -1232,8 +1241,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_pxp_resume(dev_priv->pxp);
-
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
intel_hpd_init(dev_priv);
@@ -1425,6 +1432,16 @@ static int i915_pm_resume(struct device *kdev)
return i915_drm_resume(&i915->drm);
}
+static void i915_pm_complete(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return;
+
+ i915_drm_complete(&i915->drm);
+}
+
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
@@ -1645,6 +1662,7 @@ const struct dev_pm_ops i915_pm_ops = {
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
+ .complete = i915_pm_complete,
/*
* S4 event handlers
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4295306487c7..6254aa977398 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -580,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
#define IS_ADLP_RPLP(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_RPLU(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -653,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
- (IS_TGL_UY(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GRAPHICS_STEP(p, since, until) \
- (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_DG1_DISPLAY_STEP(p, since, until) \
- (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
-
#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
@@ -876,7 +865,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv))
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 904f21e1380c..f020c0086fbc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -505,6 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
ctx->total_runtime, ctx->avg_runtime);
+ err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
}
static struct i915_vma_coredump *
@@ -1395,6 +1396,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+ e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
+ *ce->timeline->hwsp_seqno : ~0U;
e->total_runtime = intel_context_get_total_runtime_ns(ce);
e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 56027ffbce51..a91932cc6531 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -107,6 +107,7 @@ struct intel_engine_coredump {
int active;
int guilty;
struct i915_sched_attr sched_attr;
+ u32 hwsp_seqno;
} context;
struct i915_vma_coredump *vma;
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 1225bc432f0d..596dd2c07010 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -99,20 +99,6 @@ hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
return mul_u64_u32_shr(reg_value, scale_factor, nshift);
}
-static void
-hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr,
- int nshift, unsigned int scale_factor, long lval)
-{
- u32 nval;
-
- /* Computation in 64-bits to avoid overflow. Round to nearest. */
- nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor);
-
- hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr,
- PKG_PWR_LIM_1,
- REG_FIELD_PREP(PKG_PWR_LIM_1, nval));
-}
-
/*
* hwm_energy - Obtain energy value
*
@@ -232,11 +218,15 @@ hwm_power1_max_interval_store(struct device *dev,
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
/* Convert to 1.x * power(2,y) */
- if (!val)
- return -EINVAL;
- y = ilog2(val);
- /* x = (val - (1 << y)) >> (y - 2); */
- x = (val - (1ul << y)) << x_w >> y;
+ if (!val) {
+ /* Avoid ilog2(0) */
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ /* x = (val - (1 << y)) >> (y - 2); */
+ x = (val - (1ul << y)) << x_w >> y;
+ }
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
@@ -392,6 +382,22 @@ hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
}
static int
+hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ u32 nval;
+
+ /* Computation in 64-bits to avoid overflow. Round to nearest. */
+ nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
+ nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
+
+ hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1,
+ nval);
+ return 0;
+}
+
+static int
hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
{
struct i915_hwmon *hwmon = ddat->hwmon;
@@ -425,16 +431,11 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
static int
hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
{
- struct i915_hwmon *hwmon = ddat->hwmon;
u32 uval;
switch (attr) {
case hwmon_power_max:
- hwm_field_scale_and_write(ddat,
- hwmon->rg.pkg_rapl_limit,
- hwmon->scl_shift_power,
- SF_POWER, val);
- return 0;
+ return hwm_power_max_write(ddat, val);
case hwmon_power_crit:
uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
return hwm_pcode_write_i1(ddat->uncore->i915, uval);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 240d5e198904..31271c30a8cf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_pm.h"
/**
* DOC: interrupt handling
@@ -81,8 +80,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915,
}
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
-typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
- enum hpd_pin pin);
+typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
@@ -199,6 +197,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
hpd->hpd = hpd_gen11;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
hpd->hpd = hpd_bxt;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ hpd->hpd = NULL; /* no north HPD on SKL */
else if (DISPLAY_VER(dev_priv) >= 8)
hpd->hpd = hpd_bdw;
else if (DISPLAY_VER(dev_priv) >= 7)
@@ -884,7 +884,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
u32 hotplug = 0;
for_each_intel_encoder(&i915->drm, encoder)
- hotplug |= hotplug_enables(i915, encoder->hpd_pin);
+ hotplug |= hotplug_enables(encoder);
return hotplug;
}
@@ -2835,10 +2835,11 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
/*
* When CPU and PCH are on the same package, port A
@@ -2890,31 +2891,29 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
case HPD_PORT_B:
case HPD_PORT_C:
case HPD_PORT_D:
- return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
+ return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
}
-static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return ICP_TC_HPD_ENABLE(pin);
+ return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -2958,17 +2957,16 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_tc_hpd_detection_setup(dev_priv);
}
-static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return GEN11_HOTPLUG_CTL_ENABLE(pin);
+ return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -3031,10 +3029,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_hpd_irq_setup(dev_priv);
}
-static u32 spt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return PORTA_HOTPLUG_ENABLE;
case HPD_PORT_B:
@@ -3048,10 +3045,9 @@ static u32 spt_hotplug_enables(struct drm_i915_private *i915,
}
}
-static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_E:
return PORTE_HOTPLUG_ENABLE;
default:
@@ -3094,10 +3090,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_2ms;
@@ -3135,25 +3130,24 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
{
u32 hotplug;
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
hotplug = PORTA_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIA_HPD_INVERT;
return hotplug;
case HPD_PORT_B:
hotplug = PORTB_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIB_HPD_INVERT;
return hotplug;
case HPD_PORT_C:
hotplug = PORTC_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIC_HPD_INVERT;
return hotplug;
default:
@@ -3471,15 +3465,33 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
+static u32 i9xx_error_mask(struct drm_i915_private *i915)
+{
+ /*
+ * On gen2/3 FBC generates (seemingly spurious)
+ * display INVALID_GTT/INVALID_GTT_PTE table errors.
+ *
+ * Also gen3 bspec has this to say:
+ * "DISPA_INVALID_GTT_PTE
+ " [DevNapa] : Reserved. This bit does not reflect the page
+ " table error for the display plane A."
+ *
+ * Unfortunately we can't mask off individual PGTBL_ER bits,
+ * so we just have to mask off all page table errors via EMR.
+ */
+ if (HAS_FBC(i915))
+ return ~I915_ERROR_MEMORY_REFRESH;
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
- intel_uncore_write16(uncore,
- EMR,
- ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -3510,9 +3522,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915,
u16 emr;
*eir = intel_uncore_read16(uncore, EIR);
-
- if (*eir)
- intel_uncore_write16(uncore, EIR, *eir);
+ intel_uncore_write16(uncore, EIR, *eir);
*eir_stuck = intel_uncore_read16(uncore, EIR);
if (*eir_stuck == 0)
@@ -3541,6 +3551,9 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3548,7 +3561,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
@@ -3564,7 +3578,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
@@ -3576,6 +3591,9 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
@@ -3645,8 +3663,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -3749,26 +3766,31 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
-static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+static u32 i965_error_mask(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 enable_mask;
- u32 error_mask;
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
+ *
+ * i965 FBC no longer generates spurious GTT errors,
+ * so we can always enable the page table errors.
*/
- if (IS_G4X(dev_priv)) {
- error_mask = ~(GM45_ERROR_PAGE_TABLE |
- GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV |
- I915_ERROR_MEMORY_REFRESH);
- } else {
- error_mask = ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
- }
- intel_uncore_write(uncore, EMR, error_mask);
+ if (IS_G4X(i915))
+ return ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 enable_mask;
+
+ intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 52531ab28c5f..a76c5ce9513d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -17,7 +17,6 @@
#include "i915_drv.h"
#include "i915_pmu.h"
-#include "intel_pm.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3b2642397b82..d22ffd7a32dc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -116,6 +116,9 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
+#define GU_CNTL_PROTECTED _MMIO(0x10100C)
+#define DEPRESENT REG_BIT(9)
+
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
@@ -541,9 +544,10 @@
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, \
- _BXT_PHY2_BASE)
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
@@ -566,13 +570,14 @@
#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
_BXT_PHY_CTL_DDI_B)
-#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, \
- _PHY_CTL_FAMILY_DDI_C)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1038,9 +1043,11 @@
#define _MBUS_ABOX0_CTL 0x45038
#define _MBUS_ABOX1_CTL 0x45048
#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
- _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL))
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -1730,10 +1737,11 @@
#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK((pipe), _PALETTE_A, \
- _PALETTE_B, _CHV_PALETTE_C) + \
- (i) * 4)
+#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
@@ -1786,9 +1794,11 @@
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
-#define DARBF_GATING_DIS (1 << 27)
-#define PWM2_GATING_DIS (1 << 14)
-#define PWM1_GATING_DIS (1 << 13)
+#define DARBF_GATING_DIS REG_BIT(27)
+#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
+#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define PWM2_GATING_DIS REG_BIT(14)
+#define PWM1_GATING_DIS REG_BIT(13)
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
#define TGL_VRH_GATING_DIS REG_BIT(31)
@@ -1906,48 +1916,72 @@
#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
-/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _EXITLINE_A 0x60018
-#define _PIPEASRC 0x6001c
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+#define _TRANS_HBLANK_A 0x60004
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+#define _TRANS_HSYNC_A 0x60008
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+#define _TRANS_VTOTAL_A 0x6000c
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+#define _TRANS_VBLANK_A 0x60010
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+#define _TRANS_VSYNC_A 0x60014
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+#define _TRANS_EXITLINE_A 0x60018
+#define _PIPEASRC 0x6001c
#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
-#define _PIPE_MULT_A 0x6002c
-
-/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
-#define _PIPE_MULT_B 0x6102c
+#define _BCLRPAT_A 0x60020
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_MULT_A 0x6002c
+
+/* Pipe/transcoder B timing regs */
+#define _TRANS_HTOTAL_B 0x61000
+#define _TRANS_HBLANK_B 0x61004
+#define _TRANS_HSYNC_B 0x61008
+#define _TRANS_VTOTAL_B 0x6100c
+#define _TRANS_VBLANK_B 0x61010
+#define _TRANS_VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define _TRANS_MULT_B 0x6102c
/* DSI 0 timing regs */
-#define _HTOTAL_DSI0 0x6b000
-#define _HSYNC_DSI0 0x6b008
-#define _VTOTAL_DSI0 0x6b00c
-#define _VSYNC_DSI0 0x6b014
-#define _VSYNCSHIFT_DSI0 0x6b028
+#define _TRANS_HTOTAL_DSI0 0x6b000
+#define _TRANS_HSYNC_DSI0 0x6b008
+#define _TRANS_VTOTAL_DSI0 0x6b00c
+#define _TRANS_VSYNC_DSI0 0x6b014
+#define _TRANS_VSYNCSHIFT_DSI0 0x6b028
/* DSI 1 timing regs */
-#define _HTOTAL_DSI1 0x6b800
-#define _HSYNC_DSI1 0x6b808
-#define _VTOTAL_DSI1 0x6b80c
-#define _VSYNC_DSI1 0x6b814
-#define _VSYNCSHIFT_DSI1 0x6b828
+#define _TRANS_HTOTAL_DSI1 0x6b800
+#define _TRANS_HSYNC_DSI1 0x6b808
+#define _TRANS_VTOTAL_DSI1 0x6b80c
+#define _TRANS_VSYNC_DSI1 0x6b814
+#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
@@ -1958,18 +1992,18 @@
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
-#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
-#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
-#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
-#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
-#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
-#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
-#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
-#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
-#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-
-#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -2266,110 +2300,6 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
-/* Icelake DSC Rate Control Range Parameter Registers */
-#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
-#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
-#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define RC_BPG_OFFSET_SHIFT 10
-#define RC_MAX_QP_SHIFT 5
-#define RC_MIN_QP_SHIFT 0
-
-#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
-#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
-#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
-#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
-#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
-#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
-#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
-
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2451,18 +2381,7 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/*
- * HDMI/DP bits are g4x+
- *
- * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
- * Please check the detailed lore in the commit message for for experimental
- * evidence.
- */
-/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
-#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
-#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
-/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+/* HDMI/DP bits are g4x+ */
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
@@ -2592,59 +2511,6 @@
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-/* LVDS port control */
-#define LVDS _MMIO(0x61180)
-/*
- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
- * the DPLL semantics change when the LVDS is assigned to that pipe.
- */
-#define LVDS_PORT_EN (1 << 31)
-/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPE_SEL_SHIFT 30
-#define LVDS_PIPE_SEL_MASK (1 << 30)
-#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
-#define LVDS_PIPE_SEL_SHIFT_CPT 29
-#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
-#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-/* LVDS dithering flag on 965/g4x platform */
-#define LVDS_ENABLE_DITHER (1 << 25)
-/* LVDS sync polarity flags. Set to invert (i.e. negative) */
-#define LVDS_VSYNC_POLARITY (1 << 21)
-#define LVDS_HSYNC_POLARITY (1 << 20)
-
-/* Enable border for unscaled (or aspect-scaled) display */
-#define LVDS_BORDER_ENABLE (1 << 15)
-/*
- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
- * pixel.
- */
-#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
-#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
-#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
-/*
- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
- * on.
- */
-#define LVDS_A3_POWER_MASK (3 << 6)
-#define LVDS_A3_POWER_DOWN (0 << 6)
-#define LVDS_A3_POWER_UP (3 << 6)
-/*
- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
- * is set.
- */
-#define LVDS_CLKB_POWER_MASK (3 << 4)
-#define LVDS_CLKB_POWER_DOWN (0 << 4)
-#define LVDS_CLKB_POWER_UP (3 << 4)
-/*
- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
- * setting for whether we are in dual-channel mode. The B3 pair will
- * additionally only be powered up when LVDS_A3_POWER_UP is set.
- */
-#define LVDS_B0B3_POWER_MASK (3 << 2)
-#define LVDS_B0B3_POWER_DOWN (0 << 2)
-#define LVDS_B0B3_POWER_UP (3 << 2)
-
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA _MMIO(0x61178)
/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
@@ -3492,61 +3358,61 @@
#define _PIPEADSL 0x70000
#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE REG_BIT(31)
-#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define PIPECONF_PIPE_LOCKED REG_BIT(25)
-#define PIPECONF_FORCE_BORDER REG_BIT(25)
-#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0)
-#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1)
-#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0)
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */
+#define _TRANSACONF 0x70008
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
/*
* ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
* DBL=power saving pixel doubling, PF-ID* requires panel fitter
*/
-#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0)
-#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1)
-#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3)
-#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x))
-#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0)
-#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1)
-#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2)
-#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3)
-#define PIPECONF_DITHER_EN REG_BIT(4)
-#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0)
-#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1)
-#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2)
-#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -3615,7 +3481,7 @@
#define PIPE_DSI0_OFFSET 0x7b000
#define PIPE_DSI1_OFFSET 0x7b800
-#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
+#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
@@ -3631,36 +3497,38 @@
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
-#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
-#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
/*
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
* valid values of: 6, 8, 10 BPC.
* ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
* 6, 8, 10, 12 BPC.
*/
-#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5)
-#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0)
-#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1)
-#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2)
-#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */
-#define PIPEMISC_DITHER_ENABLE REG_BIT(4)
-#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0)
-#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1)
-#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2)
-#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3)
-#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define _PIPE_MISC2_A 0x7002C
#define _PIPE_MISC2_B 0x7102C
#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
-#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
@@ -4255,7 +4123,7 @@
/* Pipe B */
#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
-#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
@@ -4526,6 +4394,7 @@
#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
+#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
@@ -4549,6 +4418,7 @@
#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
@@ -4569,6 +4439,7 @@
#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
@@ -4720,10 +4591,13 @@
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
-#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
+#define PLANE_KEYMSK_ALPHA_ENABLE REG_BIT(31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
-#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define PLANE_KEYMAX_ALPHA_MASK REG_GENMASK(31, 24)
+#define PLANE_KEYMAX_ALPHA(a) REG_FIELD_PREP(PLANE_KEYMAX_ALPHA_MASK, (a))
+#define _PLANE_SURFLIVE_1_A 0x701ac
+#define _PLANE_SURFLIVE_2_A 0x702ac
#define _PLANE_CC_VAL_1_A 0x701b4
#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
@@ -4908,6 +4782,13 @@
#define PLANE_KEYMAX(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+#define _PLANE_SURFLIVE_1_B 0x711ac
+#define _PLANE_SURFLIVE_2_B 0x712ac
+#define _PLANE_SURFLIVE_1(pipe) _PIPE(pipe, _PLANE_SURFLIVE_1_A, _PLANE_SURFLIVE_1_B)
+#define _PLANE_SURFLIVE_2(pipe) _PIPE(pipe, _PLANE_SURFLIVE_2_A, _PLANE_SURFLIVE_2_B)
+#define PLANE_SURFLIVE(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe))
+
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
@@ -5432,6 +5313,7 @@
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
+#define GEN12_PIPE_VBLANK_UNMOD (1 << 19)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
@@ -6392,9 +6274,6 @@
#define FDI_PLL_CTL_1 _MMIO(0xfe000)
#define FDI_PLL_CTL_2 _MMIO(0xfe004)
-#define PCH_LVDS _MMIO(0xe1180)
-#define LVDS_DETECTED (1 << 1)
-
#define _PCH_DP_B 0xe4100
#define PCH_DP_B _MMIO(_PCH_DP_B)
#define _PCH_DPB_AUX_CH_CTL 0xe4110
@@ -6596,15 +6475,6 @@
/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
#define PCODE_MBOX_DOMAIN_NONE 0x0
#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
-
-/* Wa_14017210380: mtl */
-#define PCODE_MBOX_GT_STATE 0x50
-/* sub-commands (param1) */
-#define PCODE_MBOX_GT_STATE_MEDIA_BUSY 0x1
-#define PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
-/* param2 */
-#define PCODE_MBOX_GT_STATE_DOMAIN_MEDIA 0x1
-
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -7224,21 +7094,23 @@ enum skl_power_gate {
ADLS_DPCLKA_DDIK_SEL_MASK)
/* ICL PLL */
-#define DPLL0_ENABLE 0x46010
-#define DPLL1_ENABLE 0x46014
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
#define _ADLS_DPLL2_ENABLE 0x46018
#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE (1 << 31)
-#define PLL_LOCK (1 << 30)
-#define PLL_POWER_ENABLE (1 << 27)
-#define PLL_POWER_STATE (1 << 26)
-#define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
#define _DG2_PLL3_ENABLE 0x4601C
-#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE)
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
#define TBT_PLL_ENABLE _MMIO(0x46020)
@@ -7246,13 +7118,14 @@ enum skl_power_gate {
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as DPLL0_ENABLE */
+/* Bits are the same as _DPLL0_ENABLE */
#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
_MG_PLL2_ENABLE)
/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
/* ADL-P Type C PLL */
#define PORTTC1_PLL_ENABLE 0x46038
@@ -7312,9 +7185,9 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR0 0x164284
#define _TGL_DPLL1_CFGCR0 0x16428C
#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0)
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0)
@@ -7327,40 +7200,36 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1)
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
_TGL_DPLL1_CFGCR1)
#define _DG1_DPLL2_CFGCR0 0x16C284
#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, \
- _DG1_DPLL3_CFGCR0)
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
#define _DG1_DPLL2_CFGCR1 0x16C288
#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, \
- _DG1_DPLL3_CFGCR1)
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
#define _ADLS_DPLL4_CFGCR0 0x164294
-#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, \
- _ADLS_DPLL3_CFGCR0)
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
#define _ADLS_DPLL4_CFGCR1 0x164298
-#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, \
- _ADLS_DPLL3_CFGCR1)
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
@@ -7380,6 +7249,8 @@ enum skl_power_gate {
#define DC_STATE_DISABLE 0
#define DC_STATE_EN_DC3CO REG_BIT(30)
#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -7689,47 +7560,29 @@ enum skl_power_gate {
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
+/* g4x+, except vlv/chv! */
#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
#define PIPE_FRMTMSTMP(pipe) \
- _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
-
-/* Display Stream Splitter Control */
-#define DSS_CTL1 _MMIO(0x67400)
-#define SPLITTER_ENABLE (1 << 31)
-#define JOINER_ENABLE (1 << 30)
-#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
-#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
-#define OVERLAP_PIXELS_MASK (0xf << 16)
-#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
-#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
-
-#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
-#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-
-#define _ICL_PIPE_DSS_CTL1_PB 0x78200
-#define _ICL_PIPE_DSS_CTL1_PC 0x78400
-#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL1_PB, \
- _ICL_PIPE_DSS_CTL1_PC)
-#define BIG_JOINER_ENABLE (1 << 29)
-#define MASTER_BIG_JOINER_ENABLE (1 << 28)
-#define VGA_CENTERING_ENABLE (1 << 27)
-#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
-#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
-#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
-#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
-#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
-
-#define _ICL_PIPE_DSS_CTL2_PB 0x78204
-#define _ICL_PIPE_DSS_CTL2_PC 0x78404
-#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL2_PB, \
- _ICL_PIPE_DSS_CTL2_PC)
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(pipe) \
+ _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
@@ -7754,314 +7607,6 @@ enum skl_power_gate {
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-/* Icelake Display Stream Compression Registers */
-#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
-#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
-#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
-#define DSC_ALT_ICH_SEL (1 << 20)
-#define DSC_VBR_ENABLE (1 << 19)
-#define DSC_422_ENABLE (1 << 18)
-#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
-#define DSC_BLOCK_PREDICTION (1 << 16)
-#define DSC_LINE_BUF_DEPTH_SHIFT 12
-#define DSC_BPC_SHIFT 8
-#define DSC_VER_MIN_SHIFT 4
-#define DSC_VER_MAJ (0x1 << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
-#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
-#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
-#define DSC_BPP(bpp) ((bpp) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
-#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
-#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
-#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
-#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
-#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
-#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
-#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
-#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
-#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
-#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
-#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
-#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
-#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
-#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
-#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
-#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
-#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
-#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
-#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
-#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
-#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
-#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
-#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
-#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
-#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
-#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
-#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
-#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
-#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
-#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
-#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
-#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
-#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
-#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
-#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
-#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
-#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
-#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
-#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
-#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
-#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
-#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
-#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
-#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
-#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
-#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
-#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
-#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
-#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
-#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
-
-/* Icelake Rate Control Buffer Threshold Registers */
-#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
-#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
-#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
-#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
-#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_PC)
-#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
-
-#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
-#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
-#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
-#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
-#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_PC)
-#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
@@ -8105,8 +7650,54 @@ enum skl_power_gate {
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
-#define DSB_ENABLE (1 << 31)
-#define DSB_STATUS_BUSY (1 << 0)
+#define DSB_ENABLE REG_BIT(31)
+#define DSB_BUF_REITERATE REG_BIT(29)
+#define DSB_WAIT_FOR_VBLANK REG_BIT(28)
+#define DSB_WAIT_FOR_LINE_IN REG_BIT(27)
+#define DSB_HALT REG_BIT(16)
+#define DSB_NON_POSTED REG_BIT(8)
+#define DSB_STATUS_BUSY REG_BIT(0)
+#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc)
+#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x))
+#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0)
+#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x))
+#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10)
+#define DSB_POLL_ENABLE REG_BIT(31)
+#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23)
+#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */
+#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15)
+#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x))
+#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14)
+#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c)
+#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24)
+#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_ATS_FAULT_INT_EN REG_BIT(20)
+#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
+#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
+#define DSB_POLL_ERR_INT_EN REG_BIT(17)
+#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4)
+#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
+#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
+#define DSB_POLL_ERR_INT_STATUS REG_BIT(1)
+#define DSB_PROG_INT_STATUS REG_BIT(0)
+#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c)
+#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30)
+#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31)
+#define DSB_RM_READY_TIMEOUT REG_BIT(30)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */
+#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0)
+#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */
+#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34)
+#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38)
+#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c)
+#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40)
+#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44)
+#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48)
+#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0)
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index be43580a6979..db26de6b57bc 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -120,6 +120,35 @@
#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
* Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
*
* Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
@@ -136,6 +165,8 @@ typedef struct {
u32 reg;
} i915_mcr_reg_t;
+#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
+
#define INVALID_MMIO_REG _MMIO(0)
/*
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7503dcb9043b..630a732aaecc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -48,7 +48,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "intel_pm.h"
struct execute_cb {
struct irq_work work;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 595e8b574990..e88bb4f04305 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
-#include "intel_pm.h"
struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a72698a2dbc8..a1bc804cfa15 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -139,13 +139,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
mutex_unlock(&bman->lock);
- if (place->lpfn - place->fpfn == n_pages)
- bman_res->base.start = place->fpfn;
- else if (lpfn <= bman->visible_size)
- bman_res->base.start = 0;
- else
- bman_res->base.start = bman->visible_size;
-
*res = &bman_res->base;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 98769e5f2c3d..fc5cd14adfcc 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -119,9 +119,14 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "display version: %u\n",
runtime->display.ip.ver);
+ drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
+ drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
+ drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
+ drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
+
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
- drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
+ drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
@@ -202,6 +207,10 @@ static const u16 subplatform_rpl_ids[] = {
INTEL_RPLP_IDS(0),
};
+static const u16 subplatform_rplu_ids[] = {
+ INTEL_RPLU_IDS(0),
+};
+
static const u16 subplatform_g10_ids[] = {
INTEL_DG2_G10_IDS(0),
INTEL_ATS_M150_IDS(0),
@@ -269,6 +278,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_rpl_ids,
ARRAY_SIZE(subplatform_rpl_ids))) {
mask = BIT(INTEL_SUBPLATFORM_RPL);
+ if (find_devid(devid, subplatform_rplu_ids,
+ ARRAY_SIZE(subplatform_rplu_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_RPLU);
} else if (find_devid(devid, subplatform_g10_ids,
ARRAY_SIZE(subplatform_g10_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G10);
@@ -436,6 +448,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
+ if (HAS_DISPLAY(dev_priv) &&
+ (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) &&
+ !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(&dev_priv->drm, "Display not present, disabling\n");
+
+ runtime->pipe_mask = 0;
+ }
+
if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
@@ -457,8 +477,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
runtime->pipe_mask = 0;
- runtime->cpu_transcoder_mask = 0;
- runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
runtime->pipe_mask &= ~BIT(PIPE_C);
@@ -535,5 +553,5 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
{
drm_printf(p, "Has logical contexts? %s\n",
str_yes_no(caps->has_logical_contexts));
- drm_printf(p, "scheduler: %x\n", caps->scheduler);
+ drm_printf(p, "scheduler: 0x%x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 80bda653d61b..b30cc8b97c3a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -127,6 +127,7 @@ enum intel_platform {
* bit set
*/
#define INTEL_SUBPLATFORM_N 1
+#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
#define INTEL_SUBPLATFORM_M 0
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 1f4805aa2b08..091743e32e17 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -8,6 +8,7 @@
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_lvds_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
@@ -117,10 +118,10 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(PIPEDSL(PIPE_B));
MMIO_D(PIPEDSL(PIPE_C));
MMIO_D(PIPEDSL(_PIPE_EDP));
- MMIO_D(PIPECONF(PIPE_A));
- MMIO_D(PIPECONF(PIPE_B));
- MMIO_D(PIPECONF(PIPE_C));
- MMIO_D(PIPECONF(_PIPE_EDP));
+ MMIO_D(TRANSCONF(TRANSCODER_A));
+ MMIO_D(TRANSCONF(TRANSCODER_B));
+ MMIO_D(TRANSCONF(TRANSCODER_C));
+ MMIO_D(TRANSCONF(TRANSCODER_EDP));
MMIO_D(PIPESTAT(PIPE_A));
MMIO_D(PIPESTAT(PIPE_B));
MMIO_D(PIPESTAT(PIPE_C));
@@ -218,41 +219,41 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(HTOTAL(TRANSCODER_A));
- MMIO_D(HBLANK(TRANSCODER_A));
- MMIO_D(HSYNC(TRANSCODER_A));
- MMIO_D(VTOTAL(TRANSCODER_A));
- MMIO_D(VBLANK(TRANSCODER_A));
- MMIO_D(VSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_A));
MMIO_D(BCLRPAT(TRANSCODER_A));
- MMIO_D(VSYNCSHIFT(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A));
MMIO_D(PIPESRC(TRANSCODER_A));
- MMIO_D(HTOTAL(TRANSCODER_B));
- MMIO_D(HBLANK(TRANSCODER_B));
- MMIO_D(HSYNC(TRANSCODER_B));
- MMIO_D(VTOTAL(TRANSCODER_B));
- MMIO_D(VBLANK(TRANSCODER_B));
- MMIO_D(VSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_B));
MMIO_D(BCLRPAT(TRANSCODER_B));
- MMIO_D(VSYNCSHIFT(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B));
MMIO_D(PIPESRC(TRANSCODER_B));
- MMIO_D(HTOTAL(TRANSCODER_C));
- MMIO_D(HBLANK(TRANSCODER_C));
- MMIO_D(HSYNC(TRANSCODER_C));
- MMIO_D(VTOTAL(TRANSCODER_C));
- MMIO_D(VBLANK(TRANSCODER_C));
- MMIO_D(VSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_C));
MMIO_D(BCLRPAT(TRANSCODER_C));
- MMIO_D(VSYNCSHIFT(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C));
MMIO_D(PIPESRC(TRANSCODER_C));
- MMIO_D(HTOTAL(TRANSCODER_EDP));
- MMIO_D(HBLANK(TRANSCODER_EDP));
- MMIO_D(HSYNC(TRANSCODER_EDP));
- MMIO_D(VTOTAL(TRANSCODER_EDP));
- MMIO_D(VBLANK(TRANSCODER_EDP));
- MMIO_D(VSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_EDP));
MMIO_D(BCLRPAT(TRANSCODER_EDP));
- MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
@@ -493,9 +494,9 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(PIPE_MULT(PIPE_A));
- MMIO_D(PIPE_MULT(PIPE_B));
- MMIO_D(PIPE_MULT(PIPE_C));
+ MMIO_D(TRANS_MULT(TRANSCODER_A));
+ MMIO_D(TRANS_MULT(TRANSCODER_B));
+ MMIO_D(TRANS_MULT(TRANSCODER_C));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
@@ -788,9 +789,9 @@ static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_RING_D(RING_REG);
#undef RING_REG
- MMIO_D(PIPEMISC(PIPE_A));
- MMIO_D(PIPEMISC(PIPE_B));
- MMIO_D(PIPEMISC(PIPE_C));
+ MMIO_D(PIPE_MISC(PIPE_A));
+ MMIO_D(PIPE_MISC(PIPE_B));
+ MMIO_D(PIPE_MISC(PIPE_C));
MMIO_D(_MMIO(0x1c1d0));
MMIO_D(GEN6_MBCUNIT_SNPCR);
MMIO_D(GEN7_MISCCPCTL);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index a234d9b4ed14..3db2ba439bb5 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -204,15 +204,42 @@ out:
#undef COND
}
+static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
+{
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ 500, timeout_ms,
+ NULL))
+ return -EPROBE_DEFER;
+
+ return skl_pcode_request(uncore,
+ DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
+}
+
int intel_pcode_init(struct intel_uncore *uncore)
{
+ int err;
+
if (!IS_DGFX(uncore->i915))
return 0;
- return skl_pcode_request(uncore, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ /*
+ * Wait 10 seconds so that the punit to settle and complete
+ * any outstanding transactions upon module load
+ */
+ err = pcode_init_wait(uncore, 10000);
+
+ if (err) {
+ drm_notice(&uncore->i915->drm,
+ "Waiting for HW initialisation...\n");
+ err = pcode_init_wait(uncore, 180000);
+ }
+
+ return err;
}
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 59714b1080d4..c45af0d981fd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,13 +44,6 @@ struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -131,3961 +124,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u16 ddrpll, csipll;
-
- ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
- csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-}
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
- bool is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if (enable)
- val &= ~FORCE_DDR_HIGH_FREQ;
- else
- val |= FORCE_DDR_HIGH_FREQ;
- val &= ~FORCE_DDR_LOW_FREQ;
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
- "timed out waiting for Punit DDR DVFS request\n");
-
- vlv_punit_put(dev_priv);
-}
-
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (enable)
- val |= DSP_MAXFIFO_PM5_ENABLE;
- else
- val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-
- vlv_punit_put(dev_priv);
-}
-
-#define FW_WM(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool was_enabled;
- u32 val;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
- if (enable)
- val |= PINEVIEW_SELF_REFRESH_EN;
- else
- val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
- /*
- * FIXME can't find a bit like this for 915G, and
- * and yet it does have the related watermark in
- * FW_BLC_SELF. What's going on?
- */
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
- } else {
- return false;
- }
-
- trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
-
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
- str_enabled_disabled(enable),
- str_enabled_disabled(was_enabled));
-
- return was_enabled;
-}
-
-/**
- * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
- * @enable: Allow vs. disallow CxSR
- *
- * Allow or disallow the system to enter a special CxSR
- * (C-state self refresh) state. What typically happens in CxSR mode
- * is that several display FIFOs may get combined into a single larger
- * FIFO for a particular plane (so called max FIFO mode) to allow the
- * system to defer memory fetches longer, and the memory will enter
- * self refresh.
- *
- * Note that enabling CxSR does not guarantee that the system enter
- * this special mode, nor does it guarantee that the system stays
- * in that mode once entered. So this just allows/disallows the system
- * to autonomously utilize the CxSR mode. Other factors such as core
- * C-states will affect when/if the system actually enters/exits the
- * CxSR mode.
- *
- * Note that on VLV/CHV this actually only controls the max FIFO mode,
- * and the system is free to enter/exit memory self refresh at any time
- * even when the use of CxSR has been disallowed.
- *
- * While the system is actually in the CxSR/max FIFO mode, some plane
- * control registers will not get latched on vblank. Thus in order to
- * guarantee the system will respond to changes in the plane registers
- * we must always disallow CxSR prior to making changes to those registers.
- * Unfortunately the system will re-evaluate the CxSR conditions at
- * frame start which happens after vblank start (which is when the plane
- * registers would get latched), so we can't proceed with the plane update
- * during the same frame where we disallowed CxSR.
- *
- * Certain platforms also have a deeper HPLL SR mode. Fortunately the
- * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
- * the hardware w.r.t. HPLL SR when writing to plane registers.
- * Disallowing just CxSR is sufficient.
- */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool ret;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-
- return ret;
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int pessimal_latency_ns = 5000;
-
-#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
- ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-
-static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- int sprite0_start, sprite1_start;
- u32 dsparb, dsparb2, dsparb3;
-
- switch (pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
- break;
- case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
- sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
- sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
- break;
- default:
- MISSING_CASE(pipe);
- return;
- }
-
- fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
- fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
- fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
- fifo_state->plane[PLANE_CURSOR] = 63;
-}
-
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pnv_display_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_display_hplloff_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i965_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = I965_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i945_wm_info = {
- .fifo_size = I945_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i915_wm_info = {
- .fifo_size = I915_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_a_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_bc_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM/2,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i845_wm_info = {
- .fifo_size = I830_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-/**
- * intel_wm_method1 - Method 1 / "small buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the short term drain rate
- * of the FIFO, ie. it does not account for blanking periods
- * which would effectively reduce the average drain rate across
- * a longer period. The name "small" refers to the fact the
- * FIFO is relatively small compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\ |\
- * | \ | \
- * __---__---__ (- plane active, _ blanking)
- * -> time
- *
- * or perhaps like this:
- *
- * |\|\ |\|\
- * __----__----__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- u64 ret;
-
- ret = mul_u32_u32(pixel_rate, cpp * latency);
- ret = DIV_ROUND_UP_ULL(ret, 10000);
-
- return ret;
-}
-
-/**
- * intel_wm_method2 - Method 2 / "large buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @htotal: Pipe horizontal total
- * @width: Plane width in pixels
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the long term drain rate
- * of the FIFO, ie. it does account for blanking periods
- * which effectively reduce the average drain rate across
- * a longer period. The name "large" refers to the fact the
- * FIFO is relatively large compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\___ |\___
- * | \___ | \___
- * | \ | \
- * __ --__--__--__--__--__--__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- /*
- * FIXME remove once all users are computing
- * watermarks in the correct place.
- */
- if (WARN_ON_ONCE(htotal == 0))
- htotal = 1;
-
- ret = (latency * pixel_rate) / (htotal * 10000);
- ret = (ret + 1) * width * cpp;
-
- return ret;
-}
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @pixel_rate: pixel clock
- * @wm: chip FIFO params
- * @fifo_size: size of the FIFO buffer
- * @cpp: bytes per pixel
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned int intel_calculate_wm(int pixel_rate,
- const struct intel_watermark_params *wm,
- int fifo_size, int cpp,
- unsigned int latency_ns)
-{
- int entries, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries = intel_wm_method1(pixel_rate, cpp,
- latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
- wm->guard_size;
- DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
-
- wm_size = fifo_size - entries;
- DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
-
- /*
- * Bspec seems to indicate that the value shouldn't be lower than
- * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
- * Lets go for 8 which is the burst size since certain platforms
- * already use a hardcoded 8 (which is what the spec says should be
- * done).
- */
- if (wm_size <= 8)
- wm_size = 8;
-
- return wm_size;
-}
-
-static bool is_disabling(int old, int new, int threshold)
-{
- return old >= threshold && new < threshold;
-}
-
-static bool is_enabling(int old, int new, int threshold)
-{
- return old < threshold && new >= threshold;
-}
-
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
- return dev_priv->display.wm.max_level + 1;
-}
-
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-
- /* FIXME check the 'enable' instead */
- if (!crtc_state->hw.active)
- return false;
-
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (plane->id == PLANE_CURSOR)
- return plane_state->hw.fb != NULL;
- else
- return plane_state->uapi.visible;
-}
-
-static bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc && crtc->active && crtc->base.primary->state->fb &&
- crtc->config->hw.adjusted_mode.crtc_clock;
-}
-
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc, *enabled = NULL;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- if (intel_crtc_active(crtc)) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned int wm;
-
- latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- drm_dbg_kms(&dev_priv->drm,
- "Unknown FSB/MEM found, disable CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
- return;
- }
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int cpp = fb->format->cpp[0];
-
- /* Display SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
- pnv_display_wm.fifo_size,
- cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
- pnv_display_wm.fifo_size,
- 4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- 4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
-
- intel_set_memory_cxsr(dev_priv, true);
- } else {
- intel_set_memory_cxsr(dev_priv, false);
- }
-}
-
-/*
- * Documentation says:
- * "If the line size is small, the TLB fetches can get in the way of the
- * data fetches, causing some lag in the pixel data return which is not
- * accounted for in the above formulas. The following adjustment only
- * needs to be applied if eight whole lines fit in the buffer at once.
- * The WM is adjusted upwards by the difference between the FIFO size
- * and the size of 8 whole lines. This adjustment is always performed
- * in the actual pixel depth regardless of whether FBC is enabled or not."
- */
-static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
-{
- int tlb_miss = fifo_size * 64 - width * cpp * 8;
-
- return max(0, tlb_miss);
-}
-
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
- const struct g4x_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#define FW_WM_VLV(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
- const struct vlv_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
- }
-
- /*
- * Zero the (unused) WM1 watermarks, and also clear all the
- * high order bits so that there are no out of bounds values
- * present in the registers during the reprogramming.
- */
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- } else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- }
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#undef FW_WM_VLV
-
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
-
- dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
-}
-
-static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
-{
- /*
- * DSPCNTR[13] supposedly controls whether the
- * primary plane can use the FIFO space otherwise
- * reserved for the sprite plane. It's not 100% clear
- * what the actual FIFO size is, but it looks like we
- * can happily set both primary and sprite watermarks
- * up to 127 cachelines. So that would seem to mean
- * that either DSPCNTR[13] doesn't do anything, or that
- * the total FIFO is >= 256 cachelines in size. Either
- * way, we don't seem to have to worry about this
- * repartitioning as the maximum watermark value the
- * register can hold for each plane is lower than the
- * minimum FIFO size.
- */
- switch (plane_id) {
- case PLANE_CURSOR:
- return 63;
- case PLANE_PRIMARY:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
- case PLANE_SPRITE0:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
- default:
- MISSING_CASE(plane_id);
- return 0;
- }
-}
-
-static int g4x_fbc_fifo_size(int level)
-{
- switch (level) {
- case G4X_WM_LEVEL_SR:
- return 7;
- case G4X_WM_LEVEL_HPLL:
- return 15;
- default:
- MISSING_CASE(level);
- return 0;
- }
-}
-
-static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (latency == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- /*
- * WaUse32BppForSRWM:ctg,elk
- *
- * The spec fails to list this restriction for the
- * HPLL watermark, which seems a little strange.
- * Let's use 32bpp for the HPLL watermark as well.
- */
- if (plane->id == PLANE_PRIMARY &&
- level != G4X_WM_LEVEL_NORMAL)
- cpp = max(cpp, 4u);
-
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
- } else if (plane->id == PLANE_PRIMARY &&
- level == G4X_WM_LEVEL_NORMAL) {
- wm = intel_wm_method1(pixel_rate, cpp, latency);
- } else {
- unsigned int small, large;
-
- small = intel_wm_method1(pixel_rate, cpp, latency);
- large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
-
- wm = min(small, large);
- }
-
- wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
- width, cpp);
-
- wm = DIV_ROUND_UP(wm, 64) + 2;
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
- int level, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- /* NORMAL level doesn't have an FBC watermark */
- level = max(level, G4X_WM_LEVEL_SR);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->fbc != value;
- raw->fbc = value;
- }
-
- return dirty;
-}
-
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val);
-
-static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- enum plane_id plane_id = plane->id;
- bool dirty = false;
- int level;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
- int wm, max_wm;
-
- wm = g4x_compute_wm(crtc_state, plane_state, level);
- max_wm = g4x_plane_fifo_size(plane_id, level);
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
-
- if (plane_id != PLANE_PRIMARY ||
- level == G4X_WM_LEVEL_NORMAL)
- continue;
-
- wm = ilk_compute_fbc_wm(crtc_state, plane_state,
- raw->plane[plane_id]);
- max_wm = g4x_fbc_fifo_size(level);
-
- /*
- * FBC wm is not mandatory as we
- * can always just disable its use.
- */
- if (wm > max_wm)
- wm = USHRT_MAX;
-
- dirty |= raw->fbc != wm;
- raw->fbc = wm;
- }
-
- /* mark watermarks as invalid */
- dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- out:
- if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
-
- if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
- "FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
- }
-
- return dirty;
-}
-
-static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
-}
-
-static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (level > dev_priv->display.wm.max_level)
- return false;
-
- return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void g4x_invalidate_wms(struct intel_crtc *crtc,
- struct g4x_wm_state *wm_state, int level)
-{
- if (level <= G4X_WM_LEVEL_NORMAL) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_SR) {
- wm_state->cxsr = false;
- wm_state->sr.cursor = USHRT_MAX;
- wm_state->sr.plane = USHRT_MAX;
- wm_state->sr.fbc = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_HPLL) {
- wm_state->hpll_en = false;
- wm_state->hpll.cursor = USHRT_MAX;
- wm_state->hpll.plane = USHRT_MAX;
- wm_state->hpll.fbc = USHRT_MAX;
- }
-}
-
-static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
- int level)
-{
- if (level < G4X_WM_LEVEL_SR)
- return false;
-
- if (level >= G4X_WM_LEVEL_SR &&
- wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
- return false;
-
- if (level >= G4X_WM_LEVEL_HPLL &&
- wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
- return false;
-
- return true;
-}
-
-static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct g4x_pipe_wm *raw;
- enum plane_id plane_id;
- int level;
-
- level = G4X_WM_LEVEL_NORMAL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = raw->plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
- wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
- wm_state->sr.fbc = raw->fbc;
-
- wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
-
- level = G4X_WM_LEVEL_HPLL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
- wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
- wm_state->hpll.fbc = raw->fbc;
-
- wm_state->hpll_en = wm_state->cxsr;
-
- level++;
-
- out:
- if (level == G4X_WM_LEVEL_NORMAL)
- return -EINVAL;
-
- /* invalidate the higher levels */
- g4x_invalidate_wms(crtc, wm_state, level);
-
- /*
- * Determine if the FBC watermark(s) can be used. IF
- * this isn't the case we prefer to disable the FBC
- * watermark(s) rather than disable the SR/HPLL
- * level(s) entirely. 'level-1' is the highest valid
- * level here.
- */
- wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
-
- return 0;
-}
-
-static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- if (!dirty)
- return 0;
-
- return _g4x_compute_pipe_wm(crtc_state);
-}
-
-static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
- const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
- const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
- enum plane_id plane_id;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- intermediate->hpll_en = false;
- goto out;
- }
-
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
- intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
- !new_crtc_state->disable_cxsr;
- intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm.plane[plane_id] =
- max(optimal->wm.plane[plane_id],
- active->wm.plane[plane_id]);
-
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
- g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
- }
-
- intermediate->sr.plane = max(optimal->sr.plane,
- active->sr.plane);
- intermediate->sr.cursor = max(optimal->sr.cursor,
- active->sr.cursor);
- intermediate->sr.fbc = max(optimal->sr.fbc,
- active->sr.fbc);
-
- intermediate->hpll.plane = max(optimal->hpll.plane,
- active->hpll.plane);
- intermediate->hpll.cursor = max(optimal->hpll.cursor,
- active->hpll.cursor);
- intermediate->hpll.fbc = max(optimal->hpll.fbc,
- active->hpll.fbc);
-
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
- intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
- intermediate->hpll_en);
-
- drm_WARN_ON(&dev_priv->drm,
- intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
- intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
- intermediate->fbc_en && intermediate->hpll_en);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->cxsr = true;
- wm->hpll_en = true;
- wm->fbc_en = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
- if (!wm_state->hpll_en)
- wm->hpll_en = false;
- if (!wm_state->fbc_en)
- wm->fbc_en = false;
-
- num_active_pipes++;
- }
-
- if (num_active_pipes != 1) {
- wm->cxsr = false;
- wm->hpll_en = false;
- wm->fbc_en = false;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm;
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr;
- if (crtc->active && wm->hpll_en)
- wm->hpll = wm_state->hpll;
- }
-}
-
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
- struct g4x_wm_values new_wm = {};
-
- g4x_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- g4x_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int vlv_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64);
-
- return ret;
-}
-
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
- }
-}
-
-static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (dev_priv->display.wm.pri_latency[level] == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- /*
- * FIXME the formula gives values that are
- * too big for the cursor FIFO, and hence we
- * would never be able to use cursors. For
- * now just hardcode the watermark.
- */
- wm = 63;
- } else {
- wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
- }
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
-{
- return (active_planes & (BIT(PLANE_SPRITE0) |
- BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
-}
-
-static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- const int fifo_size = 511;
- int fifo_extra, fifo_left = fifo_size;
- int sprite0_fifo_extra = 0;
- unsigned int total_rate;
- enum plane_id plane_id;
-
- /*
- * When enabling sprite0 after sprite1 has already been enabled
- * we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
- * cacheline for sprite0 whenever sprite1 is enabled.
- *
- * All other plane enable sequences appear immune to this problem.
- */
- if (vlv_need_sprite0_fifo_workaround(active_planes))
- sprite0_fifo_extra = 1;
-
- total_rate = raw->plane[PLANE_PRIMARY] +
- raw->plane[PLANE_SPRITE0] +
- raw->plane[PLANE_SPRITE1] +
- sprite0_fifo_extra;
-
- if (total_rate > fifo_size)
- return -EINVAL;
-
- if (total_rate == 0)
- total_rate = 1;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- unsigned int rate;
-
- if ((active_planes & BIT(plane_id)) == 0) {
- fifo_state->plane[plane_id] = 0;
- continue;
- }
-
- rate = raw->plane[plane_id];
- fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
- fifo_left -= fifo_state->plane[plane_id];
- }
-
- fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
- fifo_left -= sprite0_fifo_extra;
-
- fifo_state->plane[PLANE_CURSOR] = 63;
-
- fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
-
- /* spread the remainder evenly */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- int plane_extra;
-
- if (fifo_left == 0)
- break;
-
- if ((active_planes & BIT(plane_id)) == 0)
- continue;
-
- plane_extra = min(fifo_extra, fifo_left);
- fifo_state->plane[plane_id] += plane_extra;
- fifo_left -= plane_extra;
- }
-
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
-
- /* give it all to the first plane if none are active */
- if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
- fifo_state->plane[PLANE_PRIMARY] = fifo_left;
- }
-
- return 0;
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void vlv_invalidate_wms(struct intel_crtc *crtc,
- struct vlv_wm_state *wm_state, int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm[level].plane[plane_id] = USHRT_MAX;
-
- wm_state->sr[level].cursor = USHRT_MAX;
- wm_state->sr[level].plane = USHRT_MAX;
- }
-}
-
-static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
-{
- if (wm > fifo_size)
- return USHRT_MAX;
- else
- return fifo_size - wm;
-}
-
-/*
- * Starting from 'level' set all higher
- * levels to 'value' in the "raw" watermarks.
- */
-static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(dev_priv);
- bool dirty = false;
-
- for (; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- enum plane_id plane_id = plane->id;
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- int level;
- bool dirty = false;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
- int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
- }
-
- /* mark all higher levels as invalid */
- dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
-out:
- if (dirty)
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
-
- return raw->plane[plane_id] <= fifo_state->plane[plane_id];
-}
-
-static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
-{
- return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- enum plane_id plane_id;
- int level;
-
- /* initially allow all levels */
- wm_state->num_levels = intel_wm_num_levels(dev_priv);
- /*
- * Note that enabling cxsr with no primary/sprite planes
- * enabled can wedge the pipe. Hence we only allow cxsr
- * with exactly one enabled primary/sprite plane.
- */
- wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
-
- for (level = 0; level < wm_state->num_levels; level++) {
- const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
-
- if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
- break;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- wm_state->wm[level].plane[plane_id] =
- vlv_invert_wm_value(raw->plane[plane_id],
- fifo_state->plane[plane_id]);
- }
-
- wm_state->sr[level].plane =
- vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
- raw->plane[PLANE_SPRITE0],
- raw->plane[PLANE_SPRITE1]),
- sr_fifo_size);
-
- wm_state->sr[level].cursor =
- vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
- 63);
- }
-
- if (level == 0)
- return -EINVAL;
-
- /* limit to only levels we can actually handle */
- wm_state->num_levels = level;
-
- /* invalidate the higher levels */
- vlv_invalidate_wms(crtc, wm_state, level);
-
- return 0;
-}
-
-static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- /*
- * DSPARB registers may have been reset due to the
- * power well being turned off. Make sure we restore
- * them to a consistent state even if no primary/sprite
- * planes are initially active. We also force a FIFO
- * recomputation so that we are sure to sanitize the
- * FIFO setting we took over from the BIOS even if there
- * are no active planes on the crtc.
- */
- if (intel_crtc_needs_modeset(crtc_state))
- dirty = ~0;
-
- if (!dirty)
- return 0;
-
- /* cursor changes don't warrant a FIFO recompute */
- if (dirty & ~BIT(PLANE_CURSOR)) {
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct vlv_fifo_state *old_fifo_state =
- &old_crtc_state->wm.vlv.fifo_state;
- const struct vlv_fifo_state *new_fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int ret;
-
- ret = vlv_compute_fifo(crtc_state);
- if (ret)
- return ret;
-
- if (intel_crtc_needs_modeset(crtc_state) ||
- memcmp(old_fifo_state, new_fifo_state,
- sizeof(*new_fifo_state)) != 0)
- crtc_state->fifo_changed = true;
- }
-
- return _vlv_compute_pipe_wm(crtc_state);
-}
-
-#define VLV_FIFO(plane, value) \
- (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
-
-static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int sprite0_start, sprite1_start, fifo_size;
- u32 dsparb, dsparb2, dsparb3;
-
- if (!crtc_state->fifo_changed)
- return;
-
- sprite0_start = fifo_state->plane[PLANE_PRIMARY];
- sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
- fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
-
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
-
- trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
-
- /*
- * uncore.lock serves a double purpose here. It allows us to
- * use the less expensive I915_{READ,WRITE}_FW() functions, and
- * it protects the DSPARB registers from getting clobbered by
- * parallel updates from multiple pipes.
- *
- * intel_pipe_update_start() has already disabled interrupts
- * for us, so a plain spin_lock() is sufficient here.
- */
- spin_lock(&uncore->lock);
-
- switch (crtc->pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
- VLV_FIFO(SPRITEB, 0xff));
- dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
- VLV_FIFO(SPRITEB, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
- VLV_FIFO(SPRITEB_HI, 0x1));
- dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
- VLV_FIFO(SPRITED, 0xff));
- dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
- VLV_FIFO(SPRITED, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
- VLV_FIFO(SPRITED_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
- VLV_FIFO(SPRITEF, 0xff));
- dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
- VLV_FIFO(SPRITEF, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
- VLV_FIFO(SPRITEF_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- default:
- break;
- }
-
- intel_uncore_posting_read_fw(uncore, DSPARB);
-
- spin_unlock(&uncore->lock);
-}
-
-#undef VLV_FIFO
-
-static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
- const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
- const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
- int level;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- goto out;
- }
-
- intermediate->num_levels = min(optimal->num_levels, active->num_levels);
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
-
- for (level = 0; level < intermediate->num_levels; level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm[level].plane[plane_id] =
- min(optimal->wm[level].plane[plane_id],
- active->wm[level].plane[plane_id]);
- }
-
- intermediate->sr[level].plane = min(optimal->sr[level].plane,
- active->sr[level].plane);
- intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
- active->sr[level].cursor);
- }
-
- vlv_invalidate_wms(crtc, intermediate, level);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->level = dev_priv->display.wm.max_level;
- wm->cxsr = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
-
- num_active_pipes++;
- wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
- }
-
- if (num_active_pipes != 1)
- wm->cxsr = false;
-
- if (num_active_pipes > 1)
- wm->level = VLV_WM_LEVEL_PM2;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm[wm->level];
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr[wm->level];
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
- }
-}
-
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
- struct vlv_wm_values new_wm = {};
-
- vlv_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- vlv_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void i965_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
- bool cxsr_enabled;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp = fb->format->cpp[0];
- int entries;
-
- entries = intel_wm_method2(pixel_rate, htotal,
- width, cpp, sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = intel_wm_method2(pixel_rate, htotal,
- crtc->base.cursor->state->crtc_w, 4,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size) +
- i965_cursor_wm_info.guard_size;
-
- cursor_sr = i965_cursor_wm_info.fifo_size - entries;
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- /* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
- /* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
-
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-#undef FW_WM
-
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
- enum i9xx_plane_id i9xx_plane)
-{
- struct intel_plane *plane;
-
- for_each_intel_plane(&i915->drm, plane) {
- if (plane->id == PLANE_PRIMARY &&
- plane->i9xx_plane == i9xx_plane)
- return intel_crtc_for_pipe(i915, plane->pipe);
- }
-
- return NULL;
-}
-
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
-{
- const struct intel_watermark_params *wm_info;
- u32 fwater_lo;
- u32 fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct intel_crtc *crtc;
-
- if (IS_I945GM(dev_priv))
- wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
- wm_info = &i915_wm_info;
- else
- wm_info = &i830_a_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planea_wm = fifo_size - wm_info->guard_size;
- if (planea_wm > (long)wm_info->max_wm)
- planea_wm = wm_info->max_wm;
- }
-
- if (DISPLAY_VER(dev_priv) == 2)
- wm_info = &i830_bc_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planeb_wm = fifo_size - wm_info->guard_size;
- if (planeb_wm > (long)wm_info->max_wm)
- planeb_wm = wm_info->max_wm;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
- struct drm_i915_gem_object *obj;
-
- obj = intel_fb_obj(crtc->base.primary->state->fb);
-
- /* self-refresh seems busted with untiled */
- if (!i915_gem_object_is_tiled(obj))
- crtc = NULL;
- }
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp;
- int entries;
-
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
-
- if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-static void i845_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- u32 fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- &i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
- 4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d\n", planea_wm);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method1(pixel_rate, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
-{
- /*
- * Neither of these should be possible since this function shouldn't be
- * called if the CRTC is off or the plane is invisible. But let's be
- * extra paranoid to avoid a potential divide-by-zero if we screw up
- * elsewhere in the driver.
- */
- if (WARN_ON(!cpp))
- return 0;
- if (WARN_ON(!horiz_pixels))
- return 0;
-
- return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
-}
-
-struct ilk_wm_maximums {
- u16 pri;
- u16 spr;
- u16 cur;
- u16 fbc;
-};
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value, bool is_lp)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
-
- if (!is_lp)
- return method1;
-
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-}
-
-/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val)
-{
- int cpp;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp);
-}
-
-static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 768;
- else
- return 512;
-}
-
-static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
- int level, bool is_sprite)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- /* BDW primary/sprite plane watermarks */
- return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
- /* IVB/HSW primary/sprite plane watermarks */
- return level == 0 ? 127 : 1023;
- else if (!is_sprite)
- /* ILK/SNB primary plane watermarks */
- return level == 0 ? 127 : 511;
- else
- /* ILK/SNB sprite plane watermarks */
- return level == 0 ? 63 : 255;
-}
-
-static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 7)
- return level == 0 ? 63 : 255;
- else
- return level == 0 ? 31 : 63;
-}
-
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 31;
- else
- return 15;
-}
-
-/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- bool is_sprite)
-{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
-
- /* if sprites aren't enabled, sprites get nothing */
- if (is_sprite && !config->sprites_enabled)
- return 0;
-
- /* HSW allows LP1+ watermarks even with multiple pipes */
- if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
-
- /*
- * For some reason the non self refresh
- * FIFO size is only half of the self
- * refresh FIFO size on ILK/SNB.
- */
- if (DISPLAY_VER(dev_priv) <= 6)
- fifo_size /= 2;
- }
-
- if (config->sprites_enabled) {
- /* level 0 is always calculated with 1:1 split */
- if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
- if (is_sprite)
- fifo_size *= 5;
- fifo_size /= 6;
- } else {
- fifo_size /= 2;
- }
- }
-
- /* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
-}
-
-/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config)
-{
- /* HSW LP1+ watermarks w/ multiple pipes */
- if (level > 0 && config->num_pipes_active > 1)
- return 64;
-
- /* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
-}
-
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
- int level,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static bool ilk_validate_wm_level(int level,
- const struct ilk_wm_maximums *max,
- struct intel_wm_level *result)
-{
- bool ret;
-
- /* already determined to be invalid? */
- if (!result->enable)
- return false;
-
- result->enable = result->pri_val <= max->pri &&
- result->spr_val <= max->spr &&
- result->cur_val <= max->cur;
-
- ret = result->enable;
-
- /*
- * HACK until we can pre-compute everything,
- * and thus fail gracefully if LP0 watermarks
- * are exceeded...
- */
- if (level == 0 && !result->enable) {
- if (result->pri_val > max->pri)
- DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
- level, result->pri_val, max->pri);
- if (result->spr_val > max->spr)
- DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
- level, result->spr_val, max->spr);
- if (result->cur_val > max->cur)
- DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
- level, result->cur_val, max->cur);
-
- result->pri_val = min_t(u32, result->pri_val, max->pri);
- result->spr_val = min_t(u32, result->spr_val, max->spr);
- result->cur_val = min_t(u32, result->cur_val, max->cur);
- result->enable = true;
- }
-
- return ret;
-}
-
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *crtc,
- int level,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *pristate,
- const struct intel_plane_state *sprstate,
- const struct intel_plane_state *curstate,
- struct intel_wm_level *result)
-{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
-
- /* WM1+ latency values stored in 0.5us units */
- if (level > 0) {
- pri_latency *= 5;
- spr_latency *= 5;
- cur_latency *= 5;
- }
-
- if (pristate) {
- result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
- pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
- }
-
- if (sprstate)
- result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
-
- if (curstate)
- result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
-
- result->enable = true;
-}
-
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u64 sskpd;
-
- sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
-}
-
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 sskpd;
-
- sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
-}
-
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 mltr;
-
- mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
-}
-
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
- /* how many WM levels are we expecting */
- if (HAS_HW_SAGV_WM(dev_priv))
- return 5;
- else if (DISPLAY_VER(dev_priv) >= 9)
- return 7;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 4;
- else if (DISPLAY_VER(dev_priv) >= 6)
- return 3;
- else
- return 2;
-}
-
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[])
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = wm[level];
-
- if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency not provided\n",
- name, level);
- continue;
- }
-
- /*
- * - latencies are in us on gen9.
- * - before then, WM1+ latency values are in 0.5us units
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency %u (%u.%u usec)\n", name, level,
- wm[level], latency / 10, latency % 10);
- }
-}
-
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- if (wm[0] >= min)
- return false;
-
- wm[0] = max(wm[0], min);
- for (level = 1; level <= max_level; level++)
- wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
-
- return true;
-}
-
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
-{
- bool changed;
-
- /*
- * The BIOS provided WM memory latency values are often
- * inadequate for high resolution displays. Adjust them.
- */
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
-
- if (!changed)
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
-{
- /*
- * On some SNB machines (Thinkpad X220 Tablet at least)
- * LP3 usage can cause vblank interrupts to be lost.
- * The DEIIR bit will go high but it looks like the CPU
- * never gets interrupted.
- *
- * It's not clear whether other interrupt source could
- * be affected or if this is somehow limited to vblank
- * interrupts only. To play it safe we disable LP3
- * watermarks entirely.
- */
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
- return;
-
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
-
- drm_dbg_kms(&dev_priv->drm,
- "LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
-
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
-
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
- }
-}
-
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *pipe_wm)
-{
- /* LP0 watermark maximums depend on this pipe alone */
- const struct intel_wm_config config = {
- .num_pipes_active = 1,
- .sprites_enabled = pipe_wm->sprites_enabled,
- .sprites_scaled = pipe_wm->sprites_scaled,
- };
- struct ilk_wm_maximums max;
-
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
-
- /* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
- return false;
- }
-
- return true;
-}
-
-/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_pipe_wm *pipe_wm;
- struct intel_plane *plane;
- const struct intel_plane_state *plane_state;
- const struct intel_plane_state *pristate = NULL;
- const struct intel_plane_state *sprstate = NULL;
- const struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
- struct ilk_wm_maximums max;
-
- pipe_wm = &crtc_state->wm.ilk.optimal;
-
- intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- pristate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
- curstate = plane_state;
- }
-
- pipe_wm->pipe_enabled = crtc_state->hw.active;
- pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
- pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
-
- usable_level = max_level;
-
- /* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
- usable_level = 1;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
- if (pipe_wm->sprites_scaled)
- usable_level = 0;
-
- memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
- pristate, sprstate, curstate, &pipe_wm->wm[0]);
-
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
- return -EINVAL;
-
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
-
- for (level = 1; level <= usable_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->wm[level];
-
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
- pristate, sprstate, curstate, wm);
-
- /*
- * Disable any watermark level that exceeds the
- * register maximums since such watermarks are
- * always invalid.
- */
- if (!ilk_validate_wm_level(level, &max, wm)) {
- memset(wm, 0, sizeof(*wm));
- break;
- }
- }
-
- return 0;
-}
-
-/*
- * Build a set of 'intermediate' watermark values that satisfy both the old
- * state and the new state. These can be programmed to the hardware
- * immediately.
- */
-static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
- const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * Start with the final, target watermarks, then combine with the
- * currently active watermarks to get values that are safe both before
- * and after the vblank.
- */
- *a = new_crtc_state->wm.ilk.optimal;
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state) ||
- state->skip_intermediate_wm)
- return 0;
-
- a->pipe_enabled |= b->pipe_enabled;
- a->sprites_enabled |= b->sprites_enabled;
- a->sprites_scaled |= b->sprites_scaled;
-
- for (level = 0; level <= max_level; level++) {
- struct intel_wm_level *a_wm = &a->wm[level];
- const struct intel_wm_level *b_wm = &b->wm[level];
-
- a_wm->enable &= b_wm->enable;
- a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
- a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
- a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
- a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
- }
-
- /*
- * We need to make sure that these merged watermark values are
- * actually a valid configuration themselves. If they're not,
- * there's no safe way to transition from the old state to
- * the new state, so we need to fail the atomic transaction.
- */
- if (!ilk_validate_pipe_wm(dev_priv, a))
- return -EINVAL;
-
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-/*
- * Merge the watermarks from all active pipes for a specific level.
- */
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
- int level,
- struct intel_wm_level *ret_wm)
-{
- const struct intel_crtc *crtc;
-
- ret_wm->enable = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
- const struct intel_wm_level *wm = &active->wm[level];
-
- if (!active->pipe_enabled)
- continue;
-
- /*
- * The watermark values may have been used in the past,
- * so we must maintain them in the registers for some
- * time even if the level is now disabled.
- */
- if (!wm->enable)
- ret_wm->enable = false;
-
- ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
- ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
- ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
- ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
- }
-}
-
-/*
- * Merge all low power watermarks for all active pipes.
- */
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
- const struct intel_wm_config *config,
- const struct ilk_wm_maximums *max,
- struct intel_pipe_wm *merged)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int last_enabled_level = max_level;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
- config->num_pipes_active > 1)
- last_enabled_level = 0;
-
- /* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
-
- /* merge each WM1+ level */
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- ilk_merge_wm_level(dev_priv, level, wm);
-
- if (level > last_enabled_level)
- wm->enable = false;
- else if (!ilk_validate_wm_level(level, max, wm))
- /* make sure all following levels get disabled */
- last_enabled_level = level - 1;
-
- /*
- * The spec says it is preferred to disable
- * FBC WMs instead of disabling a WM level.
- */
- if (wm->fbc_val > max->fbc) {
- if (wm->enable)
- merged->fbc_wm_enabled = false;
- wm->fbc_val = 0;
- }
- }
-
- /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
- for (level = 2; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- wm->enable = false;
- }
- }
-}
-
-static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
-{
- /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
- return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
-}
-
-/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
- int level)
-{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 2 * level;
- else
- return dev_priv->display.wm.pri_latency[level];
-}
-
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
- const struct intel_pipe_wm *merged,
- enum intel_ddb_partitioning partitioning,
- struct ilk_wm_values *results)
-{
- struct intel_crtc *crtc;
- int level, wm_lp;
-
- results->enable_fbc_wm = merged->fbc_wm_enabled;
- results->partitioning = partitioning;
-
- /* LP1+ register values */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- const struct intel_wm_level *r;
-
- level = ilk_wm_lp_to_level(wm_lp, merged);
-
- r = &merged->wm[level];
-
- /*
- * Maintain the watermark values even if the level is
- * disabled. Doing otherwise could cause underruns.
- */
- results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
- WM_LP_PRIMARY(r->pri_val) |
- WM_LP_CURSOR(r->cur_val);
-
- if (r->enable)
- results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 8)
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
- else
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
-
- results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
-
- /*
- * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
- * level is disabled. Doing otherwise could cause underruns.
- */
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
- results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
- }
- }
-
- /* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
- const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
- const struct intel_wm_level *r = &pipe_wm->wm[0];
-
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
- continue;
-
- results->wm_pipe[pipe] =
- WM0_PIPE_PRIMARY(r->pri_val) |
- WM0_PIPE_SPRITE(r->spr_val) |
- WM0_PIPE_CURSOR(r->cur_val);
- }
-}
-
-/* Find the result with the highest level enabled. Check for enable_fbc_wm in
- * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int level1 = 0, level2 = 0;
-
- for (level = 1; level <= max_level; level++) {
- if (r1->wm[level].enable)
- level1 = level;
- if (r2->wm[level].enable)
- level2 = level;
- }
-
- if (level1 == level2) {
- if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
- return r2;
- else
- return r1;
- } else if (level1 > level2) {
- return r1;
- } else {
- return r2;
- }
-}
-
-/* dirty bits used to track which watermarks need changes */
-#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
-#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
-#define WM_DIRTY_FBC (1 << 24)
-#define WM_DIRTY_DDB (1 << 25)
-
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
- const struct ilk_wm_values *old,
- const struct ilk_wm_values *new)
-{
- unsigned int dirty = 0;
- enum pipe pipe;
- int wm_lp;
-
- for_each_pipe(dev_priv, pipe) {
- if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
- dirty |= WM_DIRTY_PIPE(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
- }
-
- if (old->enable_fbc_wm != new->enable_fbc_wm) {
- dirty |= WM_DIRTY_FBC;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- if (old->partitioning != new->partitioning) {
- dirty |= WM_DIRTY_DDB;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- /* LP1+ watermarks already deemed dirty, no need to continue */
- if (dirty & WM_DIRTY_LP_ALL)
- return dirty;
-
- /* Find the lowest numbered LP1+ watermark in need of an update... */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
- old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
- break;
- }
-
- /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
- for (; wm_lp <= 3; wm_lp++)
- dirty |= WM_DIRTY_LP(wm_lp);
-
- return dirty;
-}
-
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
- unsigned int dirty)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- bool changed = false;
-
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
- previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
- previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
- previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
- changed = true;
- }
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-
- return changed;
-}
-
-/*
- * The spec says we shouldn't write when we don't need, because every write
- * causes WMs to be re-evaluated, expending some power.
- */
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
- struct ilk_wm_values *results)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- unsigned int dirty;
-
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
- if (!dirty)
- return;
-
- _ilk_disable_lp_wm(dev_priv, dirty);
-
- if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
- if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
- if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
-
- if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
- else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
- }
-
- if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
-
- if (dirty & WM_DIRTY_LP(1) &&
- previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
-
- if (DISPLAY_VER(dev_priv) >= 7) {
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
- }
-
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
-
- dev_priv->display.wm.hw = *results;
-}
-
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
-{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
-}
-
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
- struct intel_wm_config *config)
-{
- struct intel_crtc *crtc;
-
- /* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
-
- if (!wm->pipe_enabled)
- continue;
-
- config->sprites_enabled |= wm->sprites_enabled;
- config->sprites_scaled |= wm->sprites_scaled;
- config->num_pipes_active++;
- }
-}
-
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
- struct ilk_wm_maximums max;
- struct intel_wm_config config = {};
- struct ilk_wm_values results = {};
- enum intel_ddb_partitioning partitioning;
-
- ilk_compute_wm_config(dev_priv, &config);
-
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
-
- /* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
- config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
-
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
- } else {
- best_lp_wm = &lp_wm_1_2;
- }
-
- partitioning = (best_lp_wm == &lp_wm_1_2) ?
- INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
-
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
-
- ilk_write_wm_values(dev_priv, &results);
-}
-
-static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
- enum pipe pipe = crtc->pipe;
-
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
-
- memset(active, 0, sizeof(*active));
-
- active->pipe_enabled = crtc->active;
-
- if (active->pipe_enabled) {
- u32 tmp = hw->wm_pipe[pipe];
-
- /*
- * For active pipes LP0 watermark is marked as
- * enabled, and LP1+ watermaks as disabled since
- * we can't really reverse compute them in case
- * multiple pipes are active.
- */
- active->wm[0].enable = true;
- active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
- active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
- active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
- } else {
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * For inactive pipes, all watermark levels
- * should be marked as enabled but zeroed,
- * which is what we'd compute them to.
- */
- for (level = 0; level <= max_level; level++)
- active->wm[level].enable = true;
- }
-
- crtc->wm.active.ilk = *active;
-}
-
-#define _FW_WM(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
-#define _FW_WM_VLV(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
- wm->sr.fbc = _FW_WM(tmp, FBC_SR);
- wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
- wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
-}
-
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- enum pipe pipe;
- u32 tmp;
-
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] =
- (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_CURSOR] =
- (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE0] =
- (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE1] =
- (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- }
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
- wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- } else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- }
-}
-
-#undef _FW_WM
-#undef _FW_WM_VLV
-
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
- struct intel_crtc *crtc;
-
- g4x_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct g4x_wm_state *active = &crtc->wm.active.g4x;
- struct g4x_pipe_wm *raw;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level, max_level;
-
- active->cxsr = wm->cxsr;
- active->hpll_en = wm->hpll_en;
- active->fbc_en = wm->fbc_en;
-
- active->sr = wm->sr;
- active->hpll = wm->hpll;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm.plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
- }
-
- if (wm->cxsr && wm->hpll_en)
- max_level = G4X_WM_LEVEL_HPLL;
- else if (wm->cxsr)
- max_level = G4X_WM_LEVEL_SR;
- else
- max_level = G4X_WM_LEVEL_NORMAL;
-
- level = G4X_WM_LEVEL_NORMAL;
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- raw->plane[plane_id] = active->wm.plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->sr.plane;
- raw->plane[PLANE_CURSOR] = active->sr.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->sr.fbc;
-
- level = G4X_WM_LEVEL_HPLL;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->hpll.plane;
- raw->plane[PLANE_CURSOR] = active->hpll.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->hpll.fbc;
-
- level++;
- out:
- for_each_plane_id_on_crtc(crtc, plane_id)
- g4x_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- g4x_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.g4x.optimal = *active;
- crtc_state->wm.g4x.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
- "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
- str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
- str_yes_no(wm->fbc_en));
-}
-
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.g4x.raw[level];
-
- raw->plane[plane_id] = 0;
-
- if (plane_id == PLANE_PRIMARY)
- raw->fbc = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.g4x.intermediate =
- crtc_state->wm.g4x.optimal;
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- }
-
- g4x_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
- struct intel_crtc *crtc;
- u32 val;
-
- vlv_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- wm->level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (val & DSP_MAXFIFO_PM5_ENABLE)
- wm->level = VLV_WM_LEVEL_PM5;
-
- /*
- * If DDR DVFS is disabled in the BIOS, Punit
- * will never ack the request. So if that happens
- * assume we don't have to enable/disable DDR DVFS
- * dynamically. To test that just set the REQ_ACK
- * bit to poke the Punit, but don't change the
- * HIGH/LOW bits so that we don't actually change
- * the current state.
- */
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
- "Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
- } else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if ((val & FORCE_DDR_HIGH_FREQ) == 0)
- wm->level = VLV_WM_LEVEL_DDR_DVFS;
- }
-
- vlv_punit_put(dev_priv);
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct vlv_wm_state *active = &crtc->wm.active.vlv;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level;
-
- vlv_get_fifo_size(crtc_state);
-
- active->num_levels = wm->level + 1;
- active->cxsr = wm->cxsr;
-
- for (level = 0; level < active->num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- active->sr[level].plane = wm->sr.plane;
- active->sr[level].cursor = wm->sr.cursor;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm[level].plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
-
- raw->plane[plane_id] =
- vlv_invert_wm_value(active->wm[level].plane[plane_id],
- fifo_state->plane[plane_id]);
- }
- }
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- vlv_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- vlv_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.vlv.optimal = *active;
- crtc_state->wm.vlv.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
-}
-
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- raw->plane[plane_id] = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.vlv.intermediate =
- crtc_state->wm.vlv.optimal;
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- }
-
- vlv_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/*
- * FIXME should probably kill this and improve
- * the real watermark readout/sanitation instead
- */
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-}
-
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc *crtc;
-
- ilk_init_lp_watermarks(dev_priv);
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- ilk_pipe_wm_get_hw_state(crtc);
-
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
-
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
- hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -4282,16 +320,6 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
-
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int general_prio_credits,
int high_prio_credits)
@@ -4336,10 +364,6 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- /* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS);
-
/* Wa_14013723622:tgl,rkl,dg1,adl-s */
if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
@@ -4357,15 +381,6 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen12lp_init_clock_gating(dev_priv);
-
- /* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS);
-}
-
static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_22010146351:xehpsdv */
@@ -4764,12 +779,6 @@ void intel_init_clock_gating(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT(dev_priv))
- lpt_suspend_hw(dev_priv);
-}
-
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
drm_dbg_kms(&dev_priv->drm,
@@ -4785,7 +794,6 @@ CG_FUNCS(pvc);
CG_FUNCS(dg2);
CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
-CG_FUNCS(dg1);
CG_FUNCS(gen12lp);
CG_FUNCS(icl);
CG_FUNCS(cfl);
@@ -4820,7 +828,9 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_PONTEVECCHIO(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
+ else if (IS_PONTEVECCHIO(dev_priv))
dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
else if (IS_DG2(dev_priv))
dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
@@ -4828,8 +838,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_ALDERLAKE_P(dev_priv))
dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
- else if (IS_DG1(dev_priv))
- dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 12)
dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 11)
@@ -4875,117 +883,3 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
}
}
-
-static const struct intel_wm_funcs ilk_wm_funcs = {
- .compute_pipe_wm = ilk_compute_pipe_wm,
- .compute_intermediate_wm = ilk_compute_intermediate_wm,
- .initial_watermarks = ilk_initial_watermarks,
- .optimize_watermarks = ilk_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs vlv_wm_funcs = {
- .compute_pipe_wm = vlv_compute_pipe_wm,
- .compute_intermediate_wm = vlv_compute_intermediate_wm,
- .initial_watermarks = vlv_initial_watermarks,
- .optimize_watermarks = vlv_optimize_watermarks,
- .atomic_update_watermarks = vlv_atomic_update_fifo,
-};
-
-static const struct intel_wm_funcs g4x_wm_funcs = {
- .compute_pipe_wm = g4x_compute_pipe_wm,
- .compute_intermediate_wm = g4x_compute_intermediate_wm,
- .initial_watermarks = g4x_initial_watermarks,
- .optimize_watermarks = g4x_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs pnv_wm_funcs = {
- .update_wm = pnv_update_wm,
-};
-
-static const struct intel_wm_funcs i965_wm_funcs = {
- .update_wm = i965_update_wm,
-};
-
-static const struct intel_wm_funcs i9xx_wm_funcs = {
- .update_wm = i9xx_update_wm,
-};
-
-static const struct intel_wm_funcs i845_wm_funcs = {
- .update_wm = i845_update_wm,
-};
-
-static const struct intel_wm_funcs nop_funcs = {
-};
-
-/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_init(dev_priv);
- return;
- }
-
- /* For cxsr */
- if (IS_PINEVIEW(dev_priv))
- pnv_get_mem_freq(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 5)
- ilk_get_mem_freq(dev_priv);
-
- /* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
-
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
- dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
- dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.funcs.wm = &nop_funcs;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- drm_info(&dev_priv->drm,
- "failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
- } else
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
- else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else {
- drm_err(&dev_priv->drm,
- "unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
- }
-}
-
-void intel_pm_setup(struct drm_i915_private *dev_priv)
-{
- dev_priv->runtime_pm.suspended = false;
- atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c09b872d65c8..f774bddcdca6 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -13,22 +13,6 @@ struct intel_crtc_state;
struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[]);
-
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 129746713d07..cf5122299b6b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -652,6 +652,8 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
rpm->kdev = kdev;
rpm->available = HAS_RUNTIME_PM(i915);
+ rpm->suspended = false;
+ atomic_set(&rpm->wakeref_count, 0);
init_intel_runtime_pm_wakeref(rpm);
INIT_LIST_HEAD(&rpm->lmem_userfault_list);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8dee9e62a73e..e1e1f34490c8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -32,7 +32,6 @@
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
@@ -2460,7 +2459,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
{
- iounmap(regs);
+ iounmap((void __iomem *)regs);
}
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
@@ -2491,7 +2490,8 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
+ (void __force *)uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2748,14 +2748,25 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Trigger the actual Driver-FLR */
intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
+ /* Wait for hardware teardown to complete */
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL,
+ DRIVERFLR, 0,
+ flr_timeout_ms);
+ if (ret) {
+ drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
+ return;
+ }
+
+ /* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout_ms);
if (ret) {
- drm_err(&i915->drm, "wait for Driver-FLR completion failed! %d\n", ret);
+ drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
}
+ /* Clear sticky completion status */
intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index cfc9af8b3d21..9d4c7724e98e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -270,6 +270,60 @@ static bool pxp_component_bound(struct intel_pxp *pxp)
return bound;
}
+static int __pxp_global_teardown_final(struct intel_pxp *pxp)
+{
+ if (!pxp->arb_is_valid)
+ return 0;
+ /*
+ * To ensure synchronous and coherent session teardown completion
+ * in response to suspend or shutdown triggers, don't use a worker.
+ */
+ intel_pxp_mark_termination_in_progress(pxp);
+ intel_pxp_terminate(pxp, false);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
+{
+ if (pxp->arb_is_valid)
+ return 0;
+ /*
+ * The arb-session is currently inactive and we are doing a reset and restart
+ * due to a runtime event. Use the worker that was designed for this.
+ */
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void intel_pxp_end(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (__pxp_global_teardown_final(pxp))
+ drm_dbg(&i915->drm, "PXP end timed out\n");
+
+ mutex_unlock(&pxp->arb_mutex);
+
+ intel_pxp_fini_hw(pxp);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
/*
* the arb session is restarted from the irq work when we receive the
* termination completion interrupt
@@ -286,16 +340,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
mutex_lock(&pxp->arb_mutex);
- if (pxp->arb_is_valid)
- goto unlock;
-
- pxp_queue_termination(pxp);
-
- if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(250))) {
- ret = -ETIMEDOUT;
+ ret = __pxp_global_teardown_restart(pxp);
+ if (ret)
goto unlock;
- }
/* make sure the compiler doesn't optimize the double access */
barrier();
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 04440fada711..3ded0890cd27 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -24,8 +24,10 @@ void intel_pxp_init_hw(struct intel_pxp *pxp);
void intel_pxp_fini_hw(struct intel_pxp *pxp);
void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
int intel_pxp_start(struct intel_pxp *pxp);
+void intel_pxp_end(struct intel_pxp *pxp);
int intel_pxp_key_check(struct intel_pxp *pxp,
struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
index 739f9072fa5f..26f7d9f01bf3 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
@@ -12,6 +12,9 @@
/* PXP-Opcode for Init Session */
#define PXP42_CMDID_INIT_SESSION 0x1e
+/* PXP-Opcode for Invalidate Stream Key */
+#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+
/* PXP-Input-Packet: Init Session (Arb-Session) */
struct pxp42_create_arb_in {
struct pxp_cmd_header header;
@@ -25,4 +28,16 @@ struct pxp42_create_arb_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
+
#endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
index aaa8187a0afb..ae9b151b7cb7 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -28,6 +28,9 @@ struct pxp_cmd_header {
union {
u32 status; /* out */
u32 stream_id; /* in */
+#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0)
+#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1)
+#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2)
};
/* Length of the message (excluding the header) */
u32 buffer_len;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
index 64609d1b1c0f..23431c36b60b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -38,7 +38,7 @@ int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH;
huc_in.header.status = 0;
huc_in.header.buffer_len = sizeof(huc_in.huc_base_address);
- huc_in.huc_base_address = huc_phys_addr;
+ huc_in.huc_base_address = cpu_to_le64(huc_phys_addr);
err = intel_pxp_tee_stream_message(pxp, client_id, fence_id,
&huc_in, sizeof(huc_in),
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 892d39cc61c1..4f836b317424 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -16,7 +16,7 @@ void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return;
- pxp->arb_is_valid = false;
+ intel_pxp_end(pxp);
intel_pxp_invalidate(pxp);
}
@@ -34,7 +34,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp)
}
}
-void intel_pxp_resume(struct intel_pxp *pxp)
+void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index 586be769104f..06b46f535b42 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -11,7 +11,7 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
void intel_pxp_suspend(struct intel_pxp *pxp);
-void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_resume_complete(struct intel_pxp *pxp);
void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
@@ -22,7 +22,7 @@ static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
-static inline void intel_pxp_resume(struct intel_pxp *pxp)
+static inline void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
}
@@ -32,6 +32,6 @@ static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
#endif
static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
{
- intel_pxp_resume(pxp);
+ intel_pxp_resume_complete(pxp);
}
#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index ae413580b81a..448cacb0465d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -110,14 +110,16 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+ intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
+
return ret;
}
-static void pxp_terminate(struct intel_pxp *pxp)
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
{
int ret;
- pxp->hw_state_invalidated = true;
+ pxp->hw_state_invalidated = post_invalidation_needs_restart;
/*
* if we fail to submit the termination there is no point in waiting for
@@ -165,7 +167,7 @@ static void pxp_session_work(struct work_struct *work)
if (events & PXP_TERMINATION_REQUEST) {
events &= ~PXP_TERMINATION_COMPLETE;
- pxp_terminate(pxp);
+ intel_pxp_terminate(pxp, true);
}
if (events & PXP_TERMINATION_COMPLETE)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
index 903ac52cffa1..ba5788127109 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -12,9 +12,14 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_session_management_init(struct intel_pxp *pxp);
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart);
#else
static inline void intel_pxp_session_management_init(struct intel_pxp *pxp)
{
}
+
+static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
+{
+}
#endif
#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 73aa8015f828..d9d248b48093 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -127,6 +127,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
intel_wakeref_t wakeref;
int ret = 0;
+ if (!HAS_HECI_PXP(i915)) {
+ pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS);
+ if (drm_WARN_ON(&i915->drm, !pxp->dev_link))
+ return -ENODEV;
+ }
+
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = data;
pxp->pxp_component->tee_dev = tee_kdev;
@@ -169,6 +175,11 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL;
mutex_unlock(&pxp->tee_mutex);
+
+ if (pxp->dev_link) {
+ device_link_del(pxp->dev_link);
+ pxp->dev_link = NULL;
+ }
}
static const struct component_ops i915_pxp_tee_component_ops = {
@@ -308,3 +319,38 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
return ret;
}
+
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret, trials = 0;
+
+try_again:
+ memset(&msg_in, 0, sizeof(msg_in));
+ memset(&msg_out, 0, sizeof(msg_out));
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */
+ if ((ret || msg_out.header.status != 0x0) && ++trials < 3)
+ goto try_again;
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n",
+ session_id, ret);
+ else if (msg_out.header.status != 0x0)
+ drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n",
+ session_id, msg_out.header.status);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7dc5f08d1583..007de49e1ea4 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -32,6 +32,9 @@ struct intel_pxp {
* which are protected by &tee_mutex.
*/
struct i915_pxp_component *pxp_component;
+
+ /* @dev_link: Enforce module relationship for power management ordering. */
+ struct device_link *dev_link;
/**
* @pxp_component_added: track if the pxp component has been added.
* Set and cleared in tee init and fini functions respectively.
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index aaf8a380e5c7..5aee6c9a8295 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -25,6 +25,7 @@ selftest(gt_lrc, intel_lrc_live_selftests)
selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
+selftest(gt_tlb, intel_tlb_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(migrate, intel_migrate_live_selftests)
selftest(active, i915_active_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6fe22b096bdd..a9b79888c193 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -957,18 +957,18 @@ static int live_cancel_request(void *arg)
return 0;
}
-static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+static struct i915_vma *empty_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -979,15 +979,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
- vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err;
@@ -1005,6 +1005,14 @@ err:
return ERR_PTR(err);
}
+static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
+{
+ return rq->engine->emit_bb_start(rq,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
+ 0);
+}
+
static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
@@ -1016,10 +1024,7 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request))
return request;
- err = engine->emit_bb_start(request,
- i915_vma_offset(batch),
- i915_vma_size(batch),
- I915_DISPATCH_SECURE);
+ err = emit_bb_start(request, batch);
if (err)
goto out_request;
@@ -1034,8 +1039,7 @@ static int live_empty_request(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct i915_vma *batch;
- int err = 0;
+ int err;
/*
* Submit various sized batches of empty requests, to each engine
@@ -1043,16 +1047,17 @@ static int live_empty_request(void *arg)
* the overhead of submitting requests to the hardware.
*/
- batch = empty_batch(i915);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
+ struct i915_vma *batch;
unsigned long n, prime;
ktime_t times[2] = {};
+ batch = empty_batch(engine->gt);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_batch;
@@ -1100,27 +1105,29 @@ static int live_empty_request(void *arg)
engine->name,
ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime));
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ if (err)
+ break;
}
-out_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
return err;
}
-static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+static struct i915_vma *recursive_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
- const int ver = GRAPHICS_VER(i915);
+ const int ver = GRAPHICS_VER(gt->i915);
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1152,7 +1159,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
return vma;
@@ -1186,7 +1193,6 @@ static int live_all_engines(void *arg)
struct intel_engine_cs *engine;
struct i915_request **request;
struct igt_live_test t;
- struct i915_vma *batch;
unsigned int idx;
int err;
@@ -1204,42 +1210,44 @@ static int live_all_engines(void *arg)
if (err)
goto out_free;
- batch = recursive_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_free;
- }
-
- i915_vma_lock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(engine->gt);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
- goto out_request;
+ goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
+out_unlock:
+ i915_vma_unlock(batch);
+ if (err)
+ goto out_request;
}
- i915_vma_unlock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) {
@@ -1251,17 +1259,23 @@ static int live_all_engines(void *arg)
idx++;
}
- err = recursive_batch_resolve(batch);
- if (err) {
- pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
- goto out_request;
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ err = recursive_batch_resolve(request[idx]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+ idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq = request[idx];
long timeout;
- timeout = i915_request_wait(request[idx], 0,
+ timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1270,8 +1284,10 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[idx]));
- i915_request_put(request[idx]);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
request[idx] = NULL;
idx++;
}
@@ -1281,12 +1297,18 @@ static int live_all_engines(void *arg)
out_request:
idx = 0;
for_each_uabi_engine(engine, i915) {
- if (request[idx])
- i915_request_put(request[idx]);
+ struct i915_request *rq = request[idx];
+
+ if (!rq)
+ continue;
+
+ if (rq->batch) {
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ }
+ i915_request_put(rq);
idx++;
}
- i915_vma_unpin(batch);
- i915_vma_put(batch);
out_free:
kfree(request);
return err;
@@ -1322,7 +1344,7 @@ static int live_sequential_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
- batch = recursive_batch(i915);
+ batch = recursive_batch(engine->gt);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
@@ -1338,6 +1360,7 @@ static int live_sequential_engines(void *arg)
__func__, engine->name, err);
goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
if (prev) {
err = i915_request_await_dma_fence(request[idx],
@@ -1353,10 +1376,7 @@ static int live_sequential_engines(void *arg)
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index bba8cb6e8ae4..9f0651d48d41 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -10,6 +10,7 @@
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "vlv_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -42,6 +43,155 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
+static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u16 ddrpll, csipll;
+
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+}
+
+static void chv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+}
+
+static void vlv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+}
+
+static void detect_mem_freq(struct drm_i915_private *i915)
+{
+ if (IS_PINEVIEW(i915))
+ pnv_detect_mem_freq(i915);
+ else if (GRAPHICS_VER(i915) == 5)
+ ilk_detect_mem_freq(i915);
+ else if (IS_CHERRYVIEW(i915))
+ chv_detect_mem_freq(i915);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_detect_mem_freq(i915);
+
+ if (i915->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+}
+
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
return dimm->ranks * 64 / (dimm->width ?: 1);
@@ -507,6 +657,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
int ret;
+ detect_mem_freq(i915);
+
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
return;
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 24bc7b310367..8e6d457917da 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 6608a251106b..bb72fda9106d 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_plane_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_overlay_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = meson_crtc_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;
drm_mode_config_reset(drm);
@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq:
free_irq(priv->vsync_irq, drm);
+unbind_all:
+ if (has_components)
+ component_unbind_all(drm->dev, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 534621a13a34..3d046878ce6c 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
ret = devm_regulator_get_enable_optional(dev, "hdmi");
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 154837688ab0..5df1957c8e41 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
+ writel_relaxed(0x42020,
+ priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 871870ddf7ec..949b18a29a55 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -23,7 +23,6 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d09221f97f71..a1e006ec5dce 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
- OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
- OUT_RING(ring, 0x02);
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x1);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -806,7 +806,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu))
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
regbit = 2;
else
regbit = 1;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 7658e89844b4..f58dd564d122 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->preempt_lock, flags);
- empty = (get_wptr(ring) == ring->memptrs->rptr);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@@ -207,6 +207,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
}
/* Write a 0 to signal that we aren't switching pagetables */
@@ -257,7 +258,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
- ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
ptr->counter = counters_iova;
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index f3c9600221d4..7f5bc73b2040 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -974,7 +974,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
int status, ret;
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
- return 0;
+ return -EINVAL;
gmu->hung = false;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index aae60cbd9164..6faea5049f76 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1746,7 +1746,9 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_llc_slices_destroy(a6xx_gpu);
+ mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_remove(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
adreno_gpu_cleanup(adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 36f062c7582f..c5c4c93b3689 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -558,7 +558,8 @@ static void adreno_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
- WARN_ON_ONCE(adreno_system_suspend(dev));
+ if (pm_runtime_enabled(dev))
+ WARN_ON_ONCE(adreno_system_suspend(dev));
gpu->funcs->destroy(gpu);
priv->gpu_pdev = NULL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index cf053e8f081e..497c9e1673ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -12,11 +12,15 @@
#include "dpu_hw_catalog.h"
#include "dpu_kms.h"
-#define VIG_MASK \
+#define VIG_BASE_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_MASK \
+ (VIG_BASE_MASK | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
@@ -26,10 +30,7 @@
#define VIG_SC7180_MASK \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
-#define VIG_SM8250_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
@@ -51,7 +52,7 @@
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
#define MIXER_MSM8998_MASK \
- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+ (BIT(DPU_MIXER_SOURCESPLIT))
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
@@ -314,10 +315,9 @@ static const struct dpu_caps msm8998_dpu_caps = {
};
static const struct dpu_caps qcm2290_dpu_caps = {
- .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
- .ubwc_version = DPU_HW_UBWC_VER_20,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -353,9 +353,9 @@ static const struct dpu_caps sc7180_dpu_caps = {
};
static const struct dpu_caps sm6115_dpu_caps = {
- .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_10,
.has_dim_layer = true,
@@ -399,7 +399,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_caps sc8280xp_dpu_caps = {
.max_mixer_width = 2560,
.max_mixer_blendstages = 11,
- .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -413,7 +413,7 @@ static const struct dpu_caps sc8280xp_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -427,7 +427,7 @@ static const struct dpu_caps sm8250_dpu_caps = {
static const struct dpu_caps sm8350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -455,7 +455,7 @@ static const struct dpu_caps sm8450_dpu_caps = {
static const struct dpu_caps sm8550_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -525,9 +525,9 @@ static const struct dpu_mdp_cfg sdm845_mdp[] = {
.reg_off = 0x2AC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2BC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2C4, .bit_off = 8},
},
};
@@ -542,9 +542,9 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
.reg_off = 0x2AC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_WB2] = {
.reg_off = 0x3B8, .bit_off = 24},
@@ -569,9 +569,9 @@ static const struct dpu_mdp_cfg sc8180x_mdp[] = {
.reg_off = 0x2AC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2BC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2C4, .bit_off = 8},
},
};
@@ -609,9 +609,9 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
.reg_off = 0x2AC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2BC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2BC, .bit_off = 20},
@@ -638,9 +638,9 @@ static const struct dpu_mdp_cfg sm8350_mdp[] = {
.reg_off = 0x2ac, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2b4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2bc, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2c4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2bc, .bit_off = 20},
@@ -666,9 +666,9 @@ static const struct dpu_mdp_cfg sm8450_mdp[] = {
.reg_off = 0x2AC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2BC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2BC, .bit_off = 20},
@@ -685,9 +685,9 @@ static const struct dpu_mdp_cfg sc7280_mdp[] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
.reg_off = 0x2AC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
.reg_off = 0x2C4, .bit_off = 8},
},
};
@@ -696,7 +696,7 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x494,
- .features = 0,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.highest_bank_bit = 2,
.ubwc_swizzle = 6,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0},
@@ -705,8 +705,8 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
.clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x2bc, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x2c4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20},
},
};
@@ -734,9 +734,9 @@ static const struct dpu_mdp_cfg sm8550_mdp[] = {
.reg_off = 0x28330, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA3] = {
.reg_off = 0x2a330, .bit_off = 0},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA4] = {
.reg_off = 0x2c330, .bit_off = 0},
- .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .clk_ctrls[DPU_CLK_CTRL_DMA5] = {
.reg_off = 0x2e330, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2bc, .bit_off = 20},
@@ -828,19 +828,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
static const struct dpu_ctl_cfg sc7180_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
- .base = 0x1000, .len = 0xE4,
+ .base = 0x1000, .len = 0x1dc,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
},
{
.name = "ctl_1", .id = CTL_1,
- .base = 0x1200, .len = 0xE4,
+ .base = 0x1200, .len = 0x1dc,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
},
{
.name = "ctl_2", .id = CTL_2,
- .base = 0x1400, .len = 0xE4,
+ .base = 0x1400, .len = 0x1dc,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
},
@@ -1190,9 +1190,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
};
static const struct dpu_sspp_cfg sdm845_sspp[] = {
@@ -1209,9 +1209,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
};
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
@@ -1226,57 +1226,57 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
};
static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
- _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sm6115_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
};
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
- _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
- _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
- _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
- _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sm8250_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
};
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
- _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
- _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
- _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
- _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sm8450_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
@@ -1292,21 +1292,21 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
};
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
- _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
- _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
- _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
- _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK("12", 5);
-static const struct dpu_sspp_sub_blks sd8550_dma_sblk_5 = _DMA_SBLK("13", 6);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6);
static const struct dpu_sspp_cfg sm8550_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
@@ -1326,9 +1326,9 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_SDM845_MASK,
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
SSPP_BLK("sspp_12", SSPP_DMA4, 0x2c000, DMA_CURSOR_SDM845_MASK,
- sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA4),
SSPP_BLK("sspp_13", SSPP_DMA5, 0x2e000, DMA_CURSOR_SDM845_MASK,
- sd8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sm8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA5),
};
static const struct dpu_sspp_cfg sc7280_sspp[] = {
@@ -1337,37 +1337,37 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
};
static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_0 =
- _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_1 =
- _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_2 =
- _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_3 =
- _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sc8280xp_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
- SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
sc8280xp_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
- SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
sc8280xp_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
- SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
sc8280xp_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
};
#define _VIG_SBLK_NOSCALE(num, sdma_pri) \
@@ -1517,7 +1517,7 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
/* QCM2290 */
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68
@@ -1714,7 +1714,7 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
@@ -2841,8 +2841,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
.intf = qcm2290_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
- .reg_dma_count = 1,
- .dma_cfg = &sdm845_regdma,
.perf = &qcm2290_perf_data,
.mdss_irqs = IRQ_SC7180_MASK,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ddab9caebb18..e6590302b3bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -515,6 +515,8 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_DMA1,
DPU_CLK_CTRL_DMA2,
DPU_CLK_CTRL_DMA3,
+ DPU_CLK_CTRL_DMA4,
+ DPU_CLK_CTRL_DMA5,
DPU_CLK_CTRL_CURSOR0,
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index b88a2f3724e6..6c53ea560ffa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -446,7 +446,9 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
* CTL_LAYER has 3-bit field (and extra bits in EXT register),
* all EXT registers has 4-bit fields.
*/
- if (cfg->idx == 0) {
+ if (cfg->idx == -1) {
+ continue;
+ } else if (cfg->idx == 0) {
mixercfg[0] |= mix << cfg->shift;
mixercfg[1] |= ext << cfg->ext_shift;
} else {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 396429e63756..66c1b70d244f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -577,6 +577,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
+ ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 051bdbc093cf..f38296ad8743 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
+ unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
+ unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
- drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ drm_gem_lru_scan(stages[i].lru, nr,
+ &stages[i].remaining,
+ stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
+ remaining += stages[i].remaining;
}
if (freed) {
@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
stages[3].freed);
}
- return (freed > 0) ? freed : SHRINK_STOP;
+ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
NULL,
};
unsigned idx, unmapped = 0;
+ unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
+ &remaining,
vmap_shrink);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index be4bf77103cd..ac8ed731f76d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -637,8 +637,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
int ret = 0;
uint32_t i, j;
- post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!post_deps)
return ERR_PTR(-ENOMEM);
@@ -653,7 +653,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
}
post_deps[i].point = syncobj_desc.point;
- post_deps[i].chain = NULL;
if (syncobj_desc.flags) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index c5a4f49ee206..01a22a13b452 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
+#include <core/falcon.h>
#include <core/mm.h>
/* memory type/access flags, do not match hardware values */
@@ -33,7 +34,7 @@ struct nvkm_fb {
const struct nvkm_fb_func *func;
struct nvkm_subdev subdev;
- struct nvkm_blob vpr_scrubber;
+ struct nvkm_falcon_fw vpr_scrubber;
struct {
struct page *flush_page;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index bac7dcc4c2c1..0955340cc421 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -143,6 +143,10 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
if (!fb->func->vpr.scrub_required)
return 0;
+ ret = nvkm_subdev_oneinit(subdev);
+ if (ret)
+ return ret;
+
if (!fb->func->vpr.scrub_required(fb)) {
nvkm_debug(subdev, "VPR not locked\n");
return 0;
@@ -150,7 +154,7 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
- if (!fb->vpr_scrubber.size) {
+ if (!fb->vpr_scrubber.fw.img) {
nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
return 0;
}
@@ -229,7 +233,7 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
nvkm_ram_del(&fb->ram);
- nvkm_blob_dtor(&fb->vpr_scrubber);
+ nvkm_falcon_fw_dtor(&fb->vpr_scrubber);
if (fb->sysmem.flush_page) {
dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
index 5098f219e3e6..a7456e786463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -37,5 +37,5 @@ ga100_fb = {
int
ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
+ return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 5a21b0ae4595..dd476e079fe1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -25,25 +25,20 @@
#include <engine/nvdec.h>
static int
-ga102_fb_vpr_scrub(struct nvkm_fb *fb)
+ga102_fb_oneinit(struct nvkm_fb *fb)
{
- struct nvkm_falcon_fw fw = {};
- int ret;
+ struct nvkm_subdev *subdev = &fb->subdev;
- ret = nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", &fb->subdev, "nvdec/scrubber",
- 0, &fb->subdev.device->nvdec[0]->falcon, &fw);
- if (ret)
- return ret;
+ nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", subdev, "nvdec/scrubber",
+ 0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
- ret = nvkm_falcon_fw_boot(&fw, &fb->subdev, true, NULL, NULL, 0, 0);
- nvkm_falcon_fw_dtor(&fw);
- return ret;
+ return gf100_fb_oneinit(fb);
}
static const struct nvkm_fb_func
ga102_fb = {
.dtor = gf100_fb_dtor,
- .oneinit = gf100_fb_oneinit,
+ .oneinit = ga102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
@@ -51,13 +46,13 @@ ga102_fb = {
.ram_new = ga102_ram_new,
.default_bigpage = 16,
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
- .vpr.scrub = ga102_fb_vpr_scrub,
+ .vpr.scrub = gp102_fb_vpr_scrub,
};
int
ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
+ return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/ga102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 2658481d575b..14d942e8b857 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -29,18 +29,7 @@
int
gp102_fb_vpr_scrub(struct nvkm_fb *fb)
{
- struct nvkm_subdev *subdev = &fb->subdev;
- struct nvkm_falcon_fw fw = {};
- int ret;
-
- ret = nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL,
- "nvdec/scrubber", 0, &subdev->device->nvdec[0]->falcon, &fw);
- if (ret)
- return ret;
-
- ret = nvkm_falcon_fw_boot(&fw, subdev, true, NULL, NULL, 0, 0x00000000);
- nvkm_falcon_fw_dtor(&fw);
- return ret;
+ return nvkm_falcon_fw_boot(&fb->vpr_scrubber, &fb->subdev, true, NULL, NULL, 0, 0x00000000);
}
bool
@@ -51,10 +40,21 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
}
+int
+gp102_fb_oneinit(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+
+ nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, "nvdec/scrubber",
+ 0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
+
+ return gf100_fb_oneinit(fb);
+}
+
static const struct nvkm_fb_func
gp102_fb = {
.dtor = gf100_fb_dtor,
- .oneinit = gf100_fb_oneinit,
+ .oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
@@ -65,22 +65,9 @@ gp102_fb = {
};
int
-gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
-{
- int ret = gf100_fb_new_(func, device, type, inst, pfb);
- if (ret)
- return ret;
-
- nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
- &(*pfb)->vpr_scrubber);
- return 0;
-}
-
-int
gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
+ return gf100_fb_new_(&gp102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 0e3c0a8f5d71..4d8a286a7a34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -31,7 +31,7 @@ gv100_fb_init_page(struct nvkm_fb *fb)
static const struct nvkm_fb_func
gv100_fb = {
.dtor = gf100_fb_dtor,
- .oneinit = gf100_fb_oneinit,
+ .oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
@@ -45,7 +45,7 @@ gv100_fb = {
int
gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
+ return gf100_fb_new_(&gv100_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index f517751f94ac..726c30c8bf95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -83,8 +83,7 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_fb **);
+int gp102_fb_oneinit(struct nvkm_fb *);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
index be82af0364ee..b8803c124c3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -31,7 +31,7 @@ tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
static const struct nvkm_fb_func
tu102_fb = {
.dtor = gf100_fb_dtor,
- .oneinit = gf100_fb_oneinit,
+ .oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
@@ -45,7 +45,7 @@ tu102_fb = {
int
tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
+ return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 4e83a1891f3e..666a5e53fe19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 008e172ed43b..d6d29be6b4f4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -298,13 +298,26 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- if (rcdu->info->gen < 4) {
+ /*
+ * The ESCR register only exists in DU channels that can output to an
+ * LVDS or DPAT, and the OTAR register in DU channels that can output
+ * to a DPAD.
+ */
+ if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_LVDS0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_LVDS1].possible_crtcs) &
+ BIT(rcrtc->index)) {
dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
- rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
}
+ if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs) &
+ BIT(rcrtc->index))
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
+
/* Signal polarities */
dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
@@ -749,16 +762,17 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
/*
* On D3/E3 the dot clock is provided by the LVDS encoder attached to
- * the DU channel. We need to enable its clock output explicitly if
- * the LVDS output is disabled.
+ * the DU channel. We need to enable its clock output explicitly before
+ * starting the CRTC, as the bridge hasn't been enabled by the atomic
+ * helpers yet.
*/
- if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
- rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- rcar_lvds_pclk_enable(bridge, mode->clock * 1000);
+ rcar_lvds_pclk_enable(bridge, mode->clock * 1000, dot_clk_only);
}
/*
@@ -795,15 +809,16 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
- if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
- rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
- * rcar_du_crtc_atomic_enable().
+ * rcar_du_crtc_atomic_enable(). When the LVDS output is used,
+ * this also disables the LVDS encoder.
*/
- rcar_lvds_pclk_disable(bridge);
+ rcar_lvds_pclk_disable(bridge, dot_clk_only);
}
if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
@@ -815,7 +830,6 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
* Disable the DSI clock output, see
* rcar_du_crtc_atomic_enable().
*/
-
rcar_mipi_dsi_pclk_disable(bridge);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b1787be31e92..7ecec7b04a8d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
NULL);
- if (!renc)
- return -ENOMEM;
+ if (IS_ERR(renc))
+ return PTR_ERR(renc);
renc->output = output;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 152602236377..2ccd2581f544 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -138,6 +138,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 defr7 = DEFR7_CODE;
+ u32 dorcr;
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
@@ -174,8 +175,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
/*
* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+ *
+ * Groups that have a single channel have a hardcoded configuration. On
+ * Gen3 and newer, the documentation requires PG1T, DK1S and PG1D_DS1 to
+ * always be set in this case.
*/
- rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+ dorcr = DORCR_PG0D_DS0 | DORCR_DPRS;
+ if (rcdu->info->gen >= 3 && rgrp->num_crtcs == 1)
+ dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
+ rcar_du_group_write(rgrp, DORCR, dorcr);
/* Apply planes to CRTCs association. */
mutex_lock(&rgrp->lock);
@@ -349,7 +357,7 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
struct rcar_du_device *rcdu = rgrp->dev;
u32 dorcr = rcar_du_group_read(rgrp, DORCR);
- dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+ dorcr &= ~(DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_MASK);
/*
* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
@@ -357,9 +365,9 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
* by default.
*/
if (rcdu->dpad1_source == rgrp->index * 2)
- dorcr |= DORCR_PG2D_DS1;
+ dorcr |= DORCR_PG1D_DS0;
else
- dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+ dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 789ae9285108..6c750fab6ebb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -511,19 +511,19 @@
*/
#define DORCR 0x11000
-#define DORCR_PG2T (1 << 30)
-#define DORCR_DK2S (1 << 28)
-#define DORCR_PG2D_DS1 (0 << 24)
-#define DORCR_PG2D_DS2 (1 << 24)
-#define DORCR_PG2D_FIX0 (2 << 24)
-#define DORCR_PG2D_DOOR (3 << 24)
-#define DORCR_PG2D_MASK (3 << 24)
-#define DORCR_DR1D (1 << 21)
-#define DORCR_PG1D_DS1 (0 << 16)
-#define DORCR_PG1D_DS2 (1 << 16)
-#define DORCR_PG1D_FIX0 (2 << 16)
-#define DORCR_PG1D_DOOR (3 << 16)
-#define DORCR_PG1D_MASK (3 << 16)
+#define DORCR_PG1T (1 << 30)
+#define DORCR_DK1S (1 << 28)
+#define DORCR_PG1D_DS0 (0 << 24)
+#define DORCR_PG1D_DS1 (1 << 24)
+#define DORCR_PG1D_FIX0 (2 << 24)
+#define DORCR_PG1D_DOOR (3 << 24)
+#define DORCR_PG1D_MASK (3 << 24)
+#define DORCR_DR0D (1 << 21)
+#define DORCR_PG0D_DS0 (0 << 16)
+#define DORCR_PG0D_DS1 (1 << 16)
+#define DORCR_PG0D_FIX0 (2 << 16)
+#define DORCR_PG0D_DOOR (3 << 16)
+#define DORCR_PG0D_MASK (3 << 16)
#define DORCR_RGPV (1 << 4)
#define DORCR_DPRS (1 << 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index fe90be51d64e..45c05d0ffc70 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -73,7 +73,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
.src.y2 = mode->vdisplay << 16,
.zpos = 0,
},
- .format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
+ .format = rcar_du_format_info(DRM_FORMAT_XRGB8888),
.source = RCAR_DU_PLANE_VSPD1,
.colorkey = 0,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 260ea5d8624e..ca215b588fd7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -269,8 +269,8 @@ done:
pll->pll_m, pll->pll_n, pll->pll_e, pll->div);
}
-static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
- unsigned int freq, bool dot_clock_only)
+static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
+ unsigned int freq, bool dot_clock_only)
{
struct pll_info pll = { .diff = (unsigned long)-1 };
u32 lvdpllcr;
@@ -305,52 +305,8 @@ static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
rcar_lvds_write(lvds, LVDDIV, 0);
}
-static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
-{
- __rcar_lvds_pll_setup_d3_e3(lvds, freq, false);
-}
-
/* -----------------------------------------------------------------------------
- * Clock - D3/E3 only
- */
-
-int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- int ret;
-
- if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
- return -ENODEV;
-
- dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
-
- ret = pm_runtime_resume_and_get(lvds->dev);
- if (ret)
- return ret;
-
- __rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
-
-void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
- if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
- return;
-
- dev_dbg(lvds->dev, "disabling LVDS PLL\n");
-
- rcar_lvds_write(lvds, LVDPLLCR, 0);
-
- pm_runtime_put_sync(lvds->dev);
-}
-EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
-
-/* -----------------------------------------------------------------------------
- * Bridge
+ * Enable/disable
*/
static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
@@ -394,10 +350,10 @@ static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
return mode;
}
-static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_connector *connector)
+static void rcar_lvds_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdhcr;
@@ -410,8 +366,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
/* Enable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- __rcar_lvds_atomic_enable(lvds->companion, state, crtc,
- connector);
+ rcar_lvds_enable(lvds->companion, state, crtc, connector);
/*
* Hardcode the channels and control signals routing for now.
@@ -465,8 +420,12 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
/*
* PLL clock configuration on all instances but the companion in
* dual-link mode.
+ *
+ * The extended PLL has been turned on by an explicit call to
+ * rcar_lvds_pclk_enable() from the DU driver.
*/
- if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) {
+ if ((lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) &&
+ !(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
const struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, crtc);
const struct drm_display_mode *mode =
@@ -531,22 +490,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
-static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct drm_atomic_state *state = old_bridge_state->base.state;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
-
- connector = drm_atomic_get_new_connector_for_encoder(state,
- bridge->encoder);
- crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
-
- __rcar_lvds_atomic_enable(bridge, state, crtc, connector);
-}
-
-static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void rcar_lvds_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdcr0;
@@ -578,15 +522,99 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
rcar_lvds_write(lvds, LVDCR0, 0);
rcar_lvds_write(lvds, LVDCR1, 0);
- rcar_lvds_write(lvds, LVDPLLCR, 0);
+
+ /* The extended PLL is turned off in rcar_lvds_pclk_disable(). */
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))
+ rcar_lvds_write(lvds, LVDPLLCR, 0);
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion,
- old_bridge_state);
+ rcar_lvds_disable(lvds->companion);
+
+ pm_runtime_put_sync(lvds->dev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Clock - D3/E3 only
+ */
+
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq,
+ bool dot_clk_only)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ int ret;
+
+ if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+ return -ENODEV;
+
+ dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
+
+ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret)
+ return ret;
+
+ rcar_lvds_pll_setup_d3_e3(lvds, freq, dot_clk_only);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
+
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+ return;
+
+ dev_dbg(lvds->dev, "disabling LVDS PLL\n");
+
+ if (!dot_clk_only)
+ rcar_lvds_disable(bridge);
+
+ rcar_lvds_write(lvds, LVDPLLCR, 0);
pm_runtime_put_sync(lvds->dev);
}
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
+
+/* -----------------------------------------------------------------------------
+ * Bridge
+ */
+
+static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+
+ rcar_lvds_enable(bridge, state, crtc, connector);
+}
+
+static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ /*
+ * For D3 and E3, disabling the LVDS encoder before the DU would stall
+ * the DU, causing a vblank wait timeout when stopping the DU. This has
+ * been traced to clearing the LVEN bit, but the exact reason is
+ * unknown. Keep the encoder enabled, it will be disabled by an explicit
+ * call to rcar_lvds_pclk_disable() from the DU driver.
+ *
+ * We could clear the LVRES bit already to disable the LVDS output, but
+ * that's likely pointless.
+ */
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)
+ return;
+
+ rcar_lvds_disable(bridge);
+}
static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -922,14 +950,12 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77990_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_EXT_PLL
| RCAR_LVDS_QUIRK_DUAL_LINK,
- .pll_setup = rcar_lvds_pll_setup_d3_e3,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_PWD
| RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK,
- .pll_setup = rcar_lvds_pll_setup_d3_e3,
};
static const struct of_device_id rcar_lvds_of_table[] = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index bee7033b60d6..887c63500000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -13,17 +13,21 @@
struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq);
-void rcar_lvds_pclk_disable(struct drm_bridge *bridge);
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq,
+ bool dot_clk_only);
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge,
- unsigned long freq)
+ unsigned long freq, bool dot_clk_only)
{
return -ENOSYS;
}
-static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { }
+static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge,
+ bool dot_clock_only)
+{
+}
static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 7fd869520ef2..fe9c6468e440 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -123,6 +123,37 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
+static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
+ ktime_t deadline)
+{
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+ struct dma_fence *parent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fence->lock, flags);
+
+ /* If we already have an earlier deadline, keep it: */
+ if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
+ ktime_before(fence->deadline, deadline)) {
+ spin_unlock_irqrestore(&fence->lock, flags);
+ return;
+ }
+
+ fence->deadline = deadline;
+ set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
+
+ spin_unlock_irqrestore(&fence->lock, flags);
+
+ /*
+ * smp_load_aquire() to ensure that if we are racing another
+ * thread calling drm_sched_fence_set_parent(), that we see
+ * the parent set before it calls test_bit(HAS_DEADLINE_BIT)
+ */
+ parent = smp_load_acquire(&fence->parent);
+ if (parent)
+ dma_fence_set_deadline(parent, deadline);
+}
+
static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
@@ -133,6 +164,7 @@ static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
+ .set_deadline = drm_sched_fence_set_deadline_finished,
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
@@ -147,6 +179,20 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
+void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
+ struct dma_fence *fence)
+{
+ /*
+ * smp_store_release() to ensure another thread racing us
+ * in drm_sched_fence_set_deadline_finished() sees the
+ * fence's parent set before test_bit()
+ */
+ smp_store_release(&s_fence->parent, dma_fence_get(fence));
+ if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
+ &s_fence->finished.flags))
+ dma_fence_set_deadline(fence, s_fence->deadline);
+}
+
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
void *owner)
{
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7db586e6fce6..f6801eb21820 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1048,7 +1048,7 @@ static int drm_sched_main(void *param)
drm_sched_fence_scheduled(s_fence);
if (!IS_ERR_OR_NULL(fence)) {
- s_fence->parent = dma_fence_get(fence);
+ drm_sched_fence_set_parent(s_fence, fence);
/* Drop for original kref_init of the fence */
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd283a3a4e36..e49f78a6a8cf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
/* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
if (ret)
- goto cleanup_mode_config;
+ goto unbind_all;
sun4i_framebuffer_init(drm);
@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
finish_poll:
drm_kms_helper_poll_fini(drm);
+unbind_all:
+ component_unbind_all(dev, NULL);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d056d28f8758..bd5dae4d1624 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -295,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
-
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index cd73631b6106..64a59f46f6c3 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -157,7 +157,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages;
- if (!bo)
+ if (!bo || bo->resource != res)
continue;
num_pages = PFN_UP(bo->base.size);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e1accfc47edf..b1a00c0c25a7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1025,7 +1025,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 4872d183d860..aae2efeef503 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -487,7 +487,6 @@ static int host1x_get_resets(struct host1x *host)
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
- int syncpt_irq;
int err;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
@@ -517,8 +516,8 @@ static int host1x_probe(struct platform_device *pdev)
}
host->syncpt_irq = platform_get_irq(pdev, 0);
- if (syncpt_irq < 0)
- return syncpt_irq;
+ if (host->syncpt_irq < 0)
+ return host->syncpt_irq;
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 842afc88a949..22623eb4f72f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -256,6 +256,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@@ -286,8 +287,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ if (parser->device->ll_driver->max_buffer_size)
+ max_buffer_size = parser->device->ll_driver->max_buffer_size;
+
/* Total size check: Allow for possible report index byte */
- if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -1963,6 +1967,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
+ int max_buffer_size = HID_MAX_BUFFER_SIZE;
u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@@ -1978,10 +1983,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
rsize = hid_compute_report_size(report);
- if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE - 1;
- else if (rsize > HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE;
+ if (hid->ll_driver->max_buffer_size)
+ max_buffer_size = hid->ll_driver->max_buffer_size;
+
+ if (report_enum->numbered && rsize >= max_buffer_size)
+ rsize = max_buffer_size - 1;
+ else if (rsize > max_buffer_size)
+ rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
@@ -2396,7 +2404,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+ if (hdev->ll_driver->max_buffer_size)
+ max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+ if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
@@ -2415,7 +2428,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
*/
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+ if (hdev->ll_driver->max_buffer_size)
+ max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+ if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
if (hdev->ll_driver->output_report)
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 1e16b0fa310d..27cadadda7c9 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1354,6 +1354,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
+ girq->threaded = true;
ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 25dcda76d6c7..5fc88a063297 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4399,6 +4399,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
{ /* MX Master 3 mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
+ { /* MX Master 3S mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
{}
};
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 15e14239af82..a49c6affd7c4 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014-2016, Intel Corporation.
*/
+#include <linux/devm-helpers.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
@@ -621,7 +622,6 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
case MNG_RESET_NOTIFY:
if (!ishtp_dev) {
ishtp_dev = dev;
- INIT_WORK(&fw_reset_work, fw_reset_work_fn);
}
schedule_work(&fw_reset_work);
break;
@@ -940,6 +940,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
{
struct ishtp_device *dev;
int i;
+ int ret;
dev = devm_kzalloc(&pdev->dev,
sizeof(struct ishtp_device) + sizeof(struct ish_hw),
@@ -975,6 +976,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
+ ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
+ if (ret) {
+ dev_err(dev->devc, "Failed to initialise FW reset work\n");
+ return NULL;
+ }
+
dev->ops = &ish_hw_ops;
dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index f161c95a1ad2..4588d2cd4ea4 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -395,6 +395,7 @@ static const struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
.raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
+ .max_buffer_size = UHID_DATA_MAX,
};
#ifdef CONFIG_COMPAT
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 51b3d16c3223..6e4c92b500b8 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
val = (temp - val) / 1000;
if (sattr->index != 1) {
- data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+ data->temp[HYSTERSIS][sattr->index] &= 0x0F;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
} else {
- data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+ data->temp[HYSTERSIS][sattr->index] &= 0xF0;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
}
@@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
val = data->enh_acoustics[0] & 0xf;
break;
case 1:
- val = (data->enh_acoustics[1] >> 4) & 0xf;
+ val = data->enh_acoustics[1] & 0xf;
break;
case 2:
default:
- val = data->enh_acoustics[1] & 0xf;
+ val = (data->enh_acoustics[1] >> 4) & 0xf;
break;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 33edb5c02f7d..d193ed3cb35e 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -757,6 +757,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
struct hwmon_device *hwdev;
const char *label;
struct device *hdev;
+ struct device *tdev = dev;
int i, err, id;
/* Complain about invalid characters in hwmon name attribute */
@@ -826,7 +827,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->name = name;
hdev->class = &hwmon_class;
hdev->parent = dev;
- hdev->of_node = dev ? dev->of_node : NULL;
+ while (tdev && !tdev->of_node)
+ tdev = tdev->parent;
+ hdev->of_node = tdev ? tdev->of_node : NULL;
hwdev->chip = chip;
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
@@ -838,7 +841,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
INIT_LIST_HEAD(&hwdev->tzdata);
- if (dev && dev->of_node && chip && chip->ops->read &&
+ if (hdev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
err = hwmon_thermal_register_sensors(hdev);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index e06186986444..f3a4c5633b1e 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev,
return ret;
} else if (val > INA3221_CHANNEL3) {
dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
- return ret;
+ return -EINVAL;
}
input = &ina->inputs[val];
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 66f7ceaa7c3f..e9614eb557d4 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -515,6 +515,8 @@ static const struct it87_devices it87_devices[] = {
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
#define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT)
+#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
+ FEAT_10_9MV_ADC))
struct it87_sio_data {
int sioaddr;
@@ -3134,7 +3136,7 @@ static int it87_probe(struct platform_device *pdev)
"Detected broken BIOS defaults, disabling PWM interface\n");
/* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
+ if (has_scaling(data)) {
if (sio_data->internal & BIT(0))
data->in_scaled |= BIT(3); /* in3 is AVCC */
if (sio_data->internal & BIT(1))
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 88514152d930..69341de397cb 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.label = name;
st->gc.parent = &st->client->dev;
st->gc.owner = THIS_MODULE;
+ st->gc.can_sleep = true;
st->gc.base = -1;
st->gc.names = st->gpio_names;
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index 30850a479f61..87d56f0fc888 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = {
.thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
};
+static const struct cpu_info cpu_skx = {
+ .reg = &resolved_cores_reg_hsx,
+ .min_peci_revision = 0x33,
+ .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
static const struct cpu_info cpu_icx = {
.reg = &resolved_cores_reg_icx,
.min_peci_revision = 0x40,
@@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
},
{
.name = "peci_cpu.cputemp.skx",
- .driver_data = (kernel_ulong_t)&cpu_hsx,
+ .driver_data = (kernel_ulong_t)&cpu_skx,
},
{
.name = "peci_cpu.cputemp.icx",
diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
index ec5f932fc6f0..1ac2b2f4c570 100644
--- a/drivers/hwmon/pmbus/adm1266.c
+++ b/drivers/hwmon/pmbus/adm1266.c
@@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data)
data->gc.label = name;
data->gc.parent = &data->client->dev;
data->gc.owner = THIS_MODULE;
+ data->gc.can_sleep = true;
data->gc.base = -1;
data->gc.names = data->gpio_names;
data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 75fc770c9e40..3daaf2237832 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -7,6 +7,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -16,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/pmbus.h>
#include <linux/gpio/driver.h>
+#include <linux/timekeeping.h>
#include "pmbus.h"
enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
@@ -65,6 +67,7 @@ struct ucd9000_data {
struct gpio_chip gpio;
#endif
struct dentry *debugfs;
+ ktime_t write_time;
};
#define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
@@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry {
u8 index;
};
+/*
+ * It has been observed that the UCD90320 randomly fails register access when
+ * doing another access right on the back of a register write. To mitigate this
+ * make sure that there is a minimum delay between a write access and the
+ * following access. The 250us is based on experimental data. At a delay of
+ * 200us the issue seems to go away. Add a bit of extra margin to allow for
+ * system to system differences.
+ */
+#define UCD90320_WAIT_DELAY_US 250
+
+static inline void ucd90320_wait(const struct ucd9000_data *data)
+{
+ s64 delta = ktime_us_delta(ktime_get(), data->write_time);
+
+ if (delta < UCD90320_WAIT_DELAY_US)
+ udelay(UCD90320_WAIT_DELAY_US - delta);
+}
+
+static int ucd90320_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+
+ ucd90320_wait(data);
+ return pmbus_read_word_data(client, page, phase, reg);
+}
+
+static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+
+ ucd90320_wait(data);
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static int ucd90320_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+ int ret;
+
+ ucd90320_wait(data);
+ ret = pmbus_write_word_data(client, page, reg, word);
+ data->write_time = ktime_get();
+
+ return ret;
+}
+
+static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ucd9000_data *data = to_ucd9000_data(info);
+ int ret;
+
+ ucd90320_wait(data);
+ ret = pmbus_write_byte(client, page, value);
+ data->write_time = ktime_get();
+
+ return ret;
+}
+
static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
{
int fan_config = 0;
@@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client)
info->read_byte_data = ucd9000_read_byte_data;
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
| PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
+ } else if (mid->driver_data == ucd90320) {
+ info->read_byte_data = ucd90320_read_byte_data;
+ info->read_word_data = ucd90320_read_word_data;
+ info->write_byte = ucd90320_write_byte;
+ info->write_word_data = ucd90320_write_word_data;
}
ucd9000_probe_gpio(client, mid, data);
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 47bbe47e062f..7d5f7441aceb 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client)
static struct i2c_driver tmp51x_driver = {
.driver = {
.name = "tmp51x",
- .of_match_table = of_match_ptr(tmp51x_of_match),
+ .of_match_table = tmp51x_of_match,
},
.probe_new = tmp51x_probe,
.id_table = tmp51x_id,
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 5cde837bfd09..78d9f52e2a71 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)ioremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
+ ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
+ ctx->comm_base_addr,
+ pcc_chan->shmem_size);
else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
+ ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
+ ctx->comm_base_addr,
+ pcc_chan->shmem_size,
+ MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;
@@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+ cancel_work_sync(&ctx->workq);
hwmon_device_unregister(ctx->hwmon_dev);
kfifo_free(&ctx->async_msg_fifo);
if (acpi_disabled)
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index 8c6c7075c765..e067671b3ce2 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -316,6 +316,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
max_write == 0)
break;
}
+
+ /*
+ * Disable the TX_EMPTY interrupt after finishing all the messages to
+ * avoid overwhelming the CPU.
+ */
+ if (ctlr->msg_tx_idx == ctlr->msg_num)
+ hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
}
static irqreturn_t hisi_i2c_irq(int irq, void *context)
@@ -341,7 +348,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
hisi_i2c_read_rx_fifo(ctlr);
out:
- if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+ /*
+ * Only use TRANS_CPLT to indicate the completion. On error cases we'll
+ * get two interrupts, INT_ERR first then TRANS_CPLT.
+ */
+ if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
complete(ctlr->completion);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 188f2a36d2fd..a49b14d52a98 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
if (num == 1 && msgs[0].len == 0)
goto stop;
+ lpi2c_imx->rx_buf = NULL;
+ lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
@@ -503,10 +505,14 @@ disable:
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int enabled;
unsigned int temp;
+ enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
lpi2c_imx_intctrl(lpi2c_imx, 0);
temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ temp &= enabled;
if (temp & MSR_RDF)
lpi2c_imx_read_rxfifo(lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index d113bed79545..e0f3b3545cfe 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -171,7 +171,7 @@ static void mxs_i2c_dma_irq_callback(void *param)
}
static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msg, uint32_t flags)
+ struct i2c_msg *msg, u8 *buf, uint32_t flags)
{
struct dma_async_tx_descriptor *desc;
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
@@ -226,7 +226,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
}
/* Queue the DMA data transfer. */
- sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
+ sg_init_one(&i2c->sg_io[1], buf, msg->len);
dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
DMA_DEV_TO_MEM,
@@ -259,7 +259,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
/* Queue the DMA data transfer. */
sg_init_table(i2c->sg_io, 2);
sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
- sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
+ sg_set_buf(&i2c->sg_io[1], buf, msg->len);
dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
DMA_MEM_TO_DEV,
@@ -563,6 +563,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
int ret;
int flags;
+ u8 *dma_buf;
int use_pio = 0;
unsigned long time_left;
@@ -588,13 +589,20 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (ret && (ret != -ENXIO))
mxs_i2c_reset(i2c);
} else {
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
+ if (!dma_buf)
+ return -ENOMEM;
+
reinit_completion(&i2c->cmd_complete);
- ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
- if (ret)
+ ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags);
+ if (ret) {
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
return ret;
+ }
time_left = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, true);
if (!time_left)
goto timeout;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 63259b3ea5ab..3538d36368a9 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
u32 msg[3];
int rc;
+ if (writelen > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cb5fa971d67e..ae3af738b03f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -561,15 +561,8 @@ static int i2c_device_probe(struct device *dev)
goto err_detach_pm_domain;
}
- /*
- * When there are no more users of probe(),
- * rename probe_new to probe.
- */
- if (driver->probe_new)
- status = driver->probe_new(client);
- else if (driver->probe)
- status = driver->probe(client,
- i2c_match_id(driver->id_table, client));
+ if (driver->probe)
+ status = driver->probe(client);
else
status = -EINVAL;
@@ -1057,7 +1050,7 @@ static int dummy_probe(struct i2c_client *client)
static struct i2c_driver dummy_driver = {
.driver.name = "dummy",
- .probe_new = dummy_probe,
+ .probe = dummy_probe,
.id_table = dummy_id,
};
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 107623c4cc14..95a0b63ac560 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -646,7 +646,7 @@ static void i2cdev_dev_release(struct device *dev)
kfree(i2c_dev);
}
-static int i2cdev_attach_adapter(struct device *dev, void *dummy)
+static int i2cdev_attach_adapter(struct device *dev)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
@@ -685,7 +685,7 @@ err_put_i2c_dev:
return NOTIFY_DONE;
}
-static int i2cdev_detach_adapter(struct device *dev, void *dummy)
+static int i2cdev_detach_adapter(struct device *dev)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
@@ -711,9 +711,9 @@ static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- return i2cdev_attach_adapter(dev, NULL);
+ return i2cdev_attach_adapter(dev);
case BUS_NOTIFY_DEL_DEVICE:
- return i2cdev_detach_adapter(dev, NULL);
+ return i2cdev_detach_adapter(dev);
}
return NOTIFY_DONE;
@@ -725,6 +725,18 @@ static struct notifier_block i2cdev_notifier = {
/* ------------------------------------------------------------------------- */
+static int __init i2c_dev_attach_adapter(struct device *dev, void *dummy)
+{
+ i2cdev_attach_adapter(dev);
+ return 0;
+}
+
+static int __exit i2c_dev_detach_adapter(struct device *dev, void *dummy)
+{
+ i2cdev_detach_adapter(dev);
+ return 0;
+}
+
/*
* module load/unload record keeping
*/
@@ -752,7 +764,7 @@ static int __init i2c_dev_init(void)
goto out_unreg_class;
/* Bind to already existing adapters right away */
- i2c_for_each_dev(NULL, i2cdev_attach_adapter);
+ i2c_for_each_dev(NULL, i2c_dev_attach_adapter);
return 0;
@@ -768,7 +780,7 @@ out:
static void __exit i2c_dev_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
- i2c_for_each_dev(NULL, i2cdev_detach_adapter);
+ i2c_for_each_dev(NULL, i2c_dev_detach_adapter);
class_destroy(i2c_dev_class);
unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 5f25f23c4ff8..5946c0d0aef9 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -207,7 +207,7 @@ static struct i2c_driver i2c_slave_eeprom_driver = {
.driver = {
.name = "i2c-slave-eeprom",
},
- .probe_new = i2c_slave_eeprom_probe,
+ .probe = i2c_slave_eeprom_probe,
.remove = i2c_slave_eeprom_remove,
.id_table = i2c_slave_eeprom_id,
};
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 75ee7ebdb614..a49642bbae4b 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -171,7 +171,7 @@ static struct i2c_driver i2c_slave_testunit_driver = {
.driver = {
.name = "i2c-slave-testunit",
},
- .probe_new = i2c_slave_testunit_probe,
+ .probe = i2c_slave_testunit_probe,
.remove = i2c_slave_testunit_remove,
.id_table = i2c_slave_testunit_id,
};
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index cd19546d31fc..138c3f5e0093 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -169,7 +169,7 @@ static struct i2c_driver smbalert_driver = {
.driver = {
.name = "smbus_alert",
},
- .probe_new = smbalert_probe,
+ .probe = smbalert_probe,
.remove = smbalert_remove,
.id_table = smbalert_ids,
};
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 70835825083f..5a03031519be 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -306,7 +306,7 @@ static struct i2c_driver ltc4306_driver = {
.name = "ltc4306",
.of_match_table = of_match_ptr(ltc4306_of_match),
},
- .probe_new = ltc4306_probe,
+ .probe = ltc4306_probe,
.remove = ltc4306_remove,
.id_table = ltc4306_id,
};
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 09d1d9e67e31..ce0fb69249a8 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -336,7 +336,7 @@ static struct i2c_driver pca9541_driver = {
.name = "pca9541",
.of_match_table = of_match_ptr(pca9541_of_match),
},
- .probe_new = pca9541_probe,
+ .probe = pca9541_probe,
.remove = pca9541_remove,
.id_table = pca9541_id,
};
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 3639e6d7304c..0ccee2ae5720 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -554,7 +554,7 @@ static struct i2c_driver pca954x_driver = {
.pm = &pca954x_pm,
.of_match_table = pca954x_of_match,
},
- .probe_new = pca954x_probe,
+ .probe = pca954x_probe,
.remove = pca954x_remove,
.id_table = pca954x_id,
};
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 0f392f59b135..7a24c1444ace 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -850,6 +850,10 @@ void icc_node_destroy(int id)
mutex_unlock(&icc_lock);
+ if (!node)
+ return;
+
+ kfree(node->links);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
@@ -1029,54 +1033,68 @@ int icc_nodes_remove(struct icc_provider *provider)
EXPORT_SYMBOL_GPL(icc_nodes_remove);
/**
- * icc_provider_add() - add a new interconnect provider
- * @provider: the interconnect provider that will be added into topology
+ * icc_provider_init() - initialize a new interconnect provider
+ * @provider: the interconnect provider to initialize
+ *
+ * Must be called before adding nodes to the provider.
+ */
+void icc_provider_init(struct icc_provider *provider)
+{
+ WARN_ON(!provider->set);
+
+ INIT_LIST_HEAD(&provider->nodes);
+}
+EXPORT_SYMBOL_GPL(icc_provider_init);
+
+/**
+ * icc_provider_register() - register a new interconnect provider
+ * @provider: the interconnect provider to register
*
* Return: 0 on success, or an error code otherwise
*/
-int icc_provider_add(struct icc_provider *provider)
+int icc_provider_register(struct icc_provider *provider)
{
- if (WARN_ON(!provider->set))
- return -EINVAL;
if (WARN_ON(!provider->xlate && !provider->xlate_extended))
return -EINVAL;
mutex_lock(&icc_lock);
-
- INIT_LIST_HEAD(&provider->nodes);
list_add_tail(&provider->provider_list, &icc_providers);
-
mutex_unlock(&icc_lock);
- dev_dbg(provider->dev, "interconnect provider added to topology\n");
+ dev_dbg(provider->dev, "interconnect provider registered\n");
return 0;
}
-EXPORT_SYMBOL_GPL(icc_provider_add);
+EXPORT_SYMBOL_GPL(icc_provider_register);
/**
- * icc_provider_del() - delete previously added interconnect provider
- * @provider: the interconnect provider that will be removed from topology
+ * icc_provider_deregister() - deregister an interconnect provider
+ * @provider: the interconnect provider to deregister
*/
-void icc_provider_del(struct icc_provider *provider)
+void icc_provider_deregister(struct icc_provider *provider)
{
mutex_lock(&icc_lock);
- if (provider->users) {
- pr_warn("interconnect provider still has %d users\n",
- provider->users);
- mutex_unlock(&icc_lock);
- return;
- }
-
- if (!list_empty(&provider->nodes)) {
- pr_warn("interconnect provider still has nodes\n");
- mutex_unlock(&icc_lock);
- return;
- }
+ WARN_ON(provider->users);
list_del(&provider->provider_list);
mutex_unlock(&icc_lock);
}
+EXPORT_SYMBOL_GPL(icc_provider_deregister);
+
+int icc_provider_add(struct icc_provider *provider)
+{
+ icc_provider_init(provider);
+
+ return icc_provider_register(provider);
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+void icc_provider_del(struct icc_provider *provider)
+{
+ WARN_ON(!list_empty(&provider->nodes));
+
+ icc_provider_deregister(provider);
+}
EXPORT_SYMBOL_GPL(icc_provider_del);
static const struct of_device_id __maybe_unused ignore_list[] = {
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 823d9be9771a..979ed610f704 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -295,6 +295,9 @@ int imx_icc_register(struct platform_device *pdev,
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
provider->dev = dev->parent;
+
+ icc_provider_init(provider);
+
platform_set_drvdata(pdev, imx_provider);
if (settings) {
@@ -306,20 +309,18 @@ int imx_icc_register(struct platform_device *pdev,
}
}
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+ if (ret)
return ret;
- }
- ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+ ret = icc_provider_register(provider);
if (ret)
- goto provider_del;
+ goto err_unregister_nodes;
return 0;
-provider_del:
- icc_provider_del(provider);
+err_unregister_nodes:
+ imx_icc_unregister_nodes(&imx_provider->provider);
return ret;
}
EXPORT_SYMBOL_GPL(imx_icc_register);
@@ -328,9 +329,8 @@ void imx_icc_unregister(struct platform_device *pdev)
{
struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
+ icc_provider_deregister(&imx_provider->provider);
imx_icc_unregister_nodes(&imx_provider->provider);
-
- icc_provider_del(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index df3196f72536..4180a06681b2 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -503,7 +503,6 @@ regmap_done:
}
provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
@@ -511,12 +510,7 @@ regmap_done:
provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -524,7 +518,7 @@ regmap_done:
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err;
+ goto err_remove_nodes;
}
node->name = qnodes[i]->name;
@@ -538,17 +532,26 @@ regmap_done:
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
- if (of_get_child_count(dev->of_node) > 0)
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (of_get_child_count(dev->of_node) > 0) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_deregister_provider;
+ }
return 0;
-err:
+
+err_deregister_provider:
+ icc_provider_deregister(provider);
+err_remove_nodes:
icc_nodes_remove(provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(provider);
return ret;
}
@@ -558,9 +561,9 @@ int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(&qp->provider);
return 0;
}
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index fd17291c61eb..fdb5e58e408b 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -192,9 +192,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
+ icc_provider_init(provider);
+
qp->dev = dev;
qp->bcms = desc->bcms;
qp->num_bcms = desc->num_bcms;
@@ -203,10 +204,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
if (IS_ERR(qp->voter))
return PTR_ERR(qp->voter);
- ret = icc_provider_add(provider);
- if (ret)
- return ret;
-
for (i = 0; i < qp->num_bcms; i++)
qcom_icc_bcm_init(qp->bcms[i], dev);
@@ -218,7 +215,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
node = icc_node_create(qn->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err;
+ goto err_remove_nodes;
}
node->name = qn->name;
@@ -232,16 +229,27 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
+
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
- if (of_get_child_count(dev->of_node) > 0)
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (of_get_child_count(dev->of_node) > 0) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_deregister_provider;
+ }
return 0;
-err:
+
+err_deregister_provider:
+ icc_provider_deregister(provider);
+err_remove_nodes:
icc_nodes_remove(provider);
- icc_provider_del(provider);
+
return ret;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
@@ -250,8 +258,8 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
return 0;
}
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 5ea192f1141d..1828deaca443 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
return ret;
provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = msm8974_icc_set;
provider->aggregate = icc_std_aggregate;
@@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
provider->data = data;
provider->get_bw = msm8974_get_bw;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- goto err_disable_clks;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
- goto err_del_icc;
+ goto err_remove_nodes;
}
node->name = qnodes[i]->name;
@@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err_remove_nodes;
+
platform_set_drvdata(pdev, qp);
return 0;
-err_del_icc:
+err_remove_nodes:
icc_nodes_remove(provider);
- icc_provider_del(provider);
-
-err_disable_clks:
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
return ret;
@@ -747,9 +743,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(&qp->provider);
return 0;
}
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 5fa171087425..1bafb54f1432 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -158,8 +158,8 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
{
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+ icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
return 0;
}
@@ -236,7 +236,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -245,14 +245,9 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
provider->set = qcom_osm_l3_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
- INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
+ icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -275,12 +270,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, qp);
return 0;
err:
icc_nodes_remove(provider);
- icc_provider_del(provider);
return ret;
}
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 0da612d6398c..a29cdb4fac03 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -147,9 +147,9 @@ static struct qcom_icc_node mas_snoc_bimc_nrt = {
.name = "mas_snoc_bimc_nrt",
.buswidth = 16,
.qos.ap_owned = true,
- .qos.qos_port = 2,
+ .qos.qos_port = 3,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
- .mas_rpm_id = 163,
+ .mas_rpm_id = 164,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
.links = mas_snoc_bimc_nrt_links,
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index e3a12e3d6e06..2d7a8e7b85ec 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1844,100 +1844,6 @@ static const struct qcom_icc_desc sm8450_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node * const *qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(&pdev->dev, "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
-
- return 0;
-}
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8450-aggre1-noc",
.data = &sm8450_aggre1_noc},
@@ -1966,8 +1872,8 @@ static const struct of_device_id qnoc_of_match[] = {
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8450",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index 54fa027ab961..d823ba988ef6 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -2165,101 +2165,6 @@ static const struct qcom_icc_desc sm8550_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static int qnoc_probe(struct platform_device *pdev)
-{
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node * const *qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- provider = &qp->provider;
- provider->dev = &pdev->dev;
- provider->set = qcom_icc_set;
- provider->pre_aggregate = qcom_icc_pre_aggregate;
- provider->aggregate = qcom_icc_aggregate;
- provider->xlate_extended = qcom_icc_xlate_extended;
- INIT_LIST_HEAD(&provider->nodes);
- provider->data = data;
-
- qp->dev = &pdev->dev;
- qp->bcms = desc->bcms;
- qp->num_bcms = desc->num_bcms;
-
- qp->voter = of_bcm_voter_get(qp->dev, NULL);
- if (IS_ERR(qp->voter))
- return PTR_ERR(qp->voter);
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "error adding interconnect provider\n");
- return ret;
- }
-
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- if (!qnodes[i])
- continue;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- icc_provider_del(&qp->provider);
-
- return 0;
-}
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8550-aggre1-noc",
.data = &sm8550_aggre1_noc},
@@ -2294,8 +2199,8 @@ static const struct of_device_id qnoc_of_match[] = {
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
- .probe = qnoc_probe,
- .remove = qnoc_remove,
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index 6559d8cf8068..ebf09bbf725b 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -96,14 +96,9 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
static int exynos_generic_icc_remove(struct platform_device *pdev)
{
struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
- struct icc_node *parent_node, *node = priv->node;
-
- parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
- if (parent_node && !IS_ERR(parent_node))
- icc_link_destroy(node, parent_node);
+ icc_provider_deregister(&priv->provider);
icc_nodes_remove(&priv->provider);
- icc_provider_del(&priv->provider);
return 0;
}
@@ -132,15 +127,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
provider->inter_set = true;
provider->data = priv;
- ret = icc_provider_add(provider);
- if (ret < 0)
- return ret;
+ icc_provider_init(provider);
icc_node = icc_node_create(pdev->id);
- if (IS_ERR(icc_node)) {
- ret = PTR_ERR(icc_node);
- goto err_prov_del;
- }
+ if (IS_ERR(icc_node))
+ return PTR_ERR(icc_node);
priv->node = icc_node;
icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
@@ -149,6 +140,9 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
&priv->bus_clk_ratio))
priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
+ icc_node->data = priv;
+ icc_node_add(icc_node, provider);
+
/*
* Register a PM QoS request for the parent (devfreq) device.
*/
@@ -157,9 +151,6 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_node_del;
- icc_node->data = priv;
- icc_node_add(icc_node, provider);
-
icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
if (IS_ERR(icc_parent_node)) {
ret = PTR_ERR(icc_parent_node);
@@ -171,14 +162,17 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
goto err_pmqos_del;
}
+ ret = icc_provider_register(provider);
+ if (ret < 0)
+ goto err_pmqos_del;
+
return 0;
err_pmqos_del:
dev_pm_qos_remove_request(&priv->qos_req);
err_node_del:
icc_nodes_remove(provider);
-err_prov_del:
- icc_provider_del(provider);
+
return ret;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 5f1e2593fad7..b0a22e99bade 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -15,6 +15,10 @@ if MD
config BLK_DEV_MD
tristate "RAID support"
select BLOCK_HOLDER_DEPRECATED if SYSFS
+ # BLOCK_LEGACY_AUTOLOAD requirement should be removed
+ # after relevant mdadm enhancements - to make "names=yes"
+ # the default - are widely available.
+ select BLOCK_LEGACY_AUTOLOAD
help
This driver lets you combine several hard disk partitions into one
logical block device. This can be used to simply append one
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 40cb1719ae4d..3ba53dc3cc3f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -72,7 +72,9 @@ struct dm_crypt_io {
struct crypt_config *cc;
struct bio *base_bio;
u8 *integrity_metadata;
- bool integrity_metadata_from_pool;
+ bool integrity_metadata_from_pool:1;
+ bool in_tasklet:1;
+
struct work_struct work;
struct tasklet_struct tasklet;
@@ -1730,6 +1732,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
+ io->in_tasklet = false;
atomic_set(&io->io_pending, 0);
}
@@ -1776,14 +1779,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
- if (tasklet_trylock(&io->tasklet)) {
- tasklet_unlock(&io->tasklet);
- bio_endio(base_bio);
+ if (io->in_tasklet) {
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
return;
}
- INIT_WORK(&io->work, kcryptd_io_bio_endio);
- queue_work(cc->io_queue, &io->work);
+ bio_endio(base_bio);
}
/*
@@ -1936,6 +1938,7 @@ pop_from_list:
io = crypt_io_from_node(rb_first(&write_tree));
rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
+ cond_resched();
} while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}
@@ -2230,6 +2233,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* it is being executed with irqs disabled.
*/
if (in_hardirq() || irqs_disabled()) {
+ io->in_tasklet = true;
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index c21a19ab73f7..db2d997a6c18 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
atomic_read(&shared->in_flight[WRITE]);
}
-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
{
int cpu;
struct dm_stats_last_position *last;
@@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
INIT_LIST_HEAD(&stats->list);
stats->precise_timestamps = false;
stats->last = alloc_percpu(struct dm_stats_last_position);
+ if (!stats->last)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
last->last_sector = (sector_t)ULLONG_MAX;
last->last_rw = UINT_MAX;
}
+
+ return 0;
}
void dm_stats_cleanup(struct dm_stats *stats)
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 0bc152c8e4f3..c6728c8b4159 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -21,7 +21,7 @@ struct dm_stats_aux {
unsigned long long duration_ns;
};
-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6cd105c1cef3..13d4677baafd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3369,6 +3369,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
/*
* Only need to enable discards if the pool should pass
@@ -4249,6 +4250,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
ti->flush_supported = true;
ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eace45a18d45..2d0f934ba6e6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end)
sectors = io->sectors;
if (!end)
- bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
- start_time);
+ bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
else
- bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
+ bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
+ start_time);
if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&md->stats))) {
@@ -2097,7 +2097,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->pending_io)
goto bad;
- dm_stats_init(&md->stats);
+ r = dm_stats_init(&md->stats);
+ if (r < 0)
+ goto bad;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 927a43db5dfb..39e49e5d7182 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3128,6 +3128,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
+ if (slot < 0)
+ /* overflow */
+ return -ENOSPC;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
@@ -6256,6 +6259,11 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ percpu_ref_exit(&mddev->writes_pending);
+ percpu_ref_exit(&mddev->active_io);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
}
void md_stop(struct mddev *mddev)
@@ -6265,10 +6273,6 @@ void md_stop(struct mddev *mddev)
*/
__md_stop_writes(mddev);
__md_stop(mddev);
- percpu_ref_exit(&mddev->writes_pending);
- percpu_ref_exit(&mddev->active_io);
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -7839,11 +7843,6 @@ static void md_free_disk(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
- percpu_ref_exit(&mddev->writes_pending);
- percpu_ref_exit(&mddev->active_io);
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
-
mddev_free(mddev);
}
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 2b01873ba0db..5c2336f318d9 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code)
do {
if (code == m5mols_default_ffmt[type].code)
return type;
- } while (type++ != SIZE_DEFAULT_FFMT);
+ } while (++type != SIZE_DEFAULT_FFMT);
return 0;
}
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index a3b524f15d89..1c80b121e7d6 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -707,8 +707,7 @@ static int ov2685_configure_regulators(struct ov2685 *ov2685)
ov2685->supplies);
}
-static int ov2685_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2685_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov2685 *ov2685;
@@ -830,7 +829,7 @@ static struct i2c_driver ov2685_i2c_driver = {
.pm = &ov2685_pm_ops,
.of_match_table = of_match_ptr(ov2685_of_match),
},
- .probe = &ov2685_probe,
+ .probe_new = &ov2685_probe,
.remove = &ov2685_remove,
};
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 61906fc54e37..b287c28920a6 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1267,8 +1267,7 @@ static int ov5695_configure_regulators(struct ov5695 *ov5695)
ov5695->supplies);
}
-static int ov5695_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5695_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov5695 *ov5695;
@@ -1393,7 +1392,7 @@ static struct i2c_driver ov5695_i2c_driver = {
.pm = &ov5695_pm_ops,
.of_match_table = of_match_ptr(ov5695_of_match),
},
- .probe = &ov5695_probe,
+ .probe_new = &ov5695_probe,
.remove = &ov5695_remove,
};
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 592907546ee6..5cd28619ea9f 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -794,16 +794,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
mc->provider.aggregate = mc->soc->icc_ops->aggregate;
mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
- err = icc_provider_add(&mc->provider);
- if (err)
- return err;
+ icc_provider_init(&mc->provider);
/* create Memory Controller node */
node = icc_node_create(TEGRA_ICC_MC);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto del_provider;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
node->name = "Memory Controller";
icc_node_add(node, &mc->provider);
@@ -830,12 +826,14 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
goto remove_nodes;
}
+ err = icc_provider_register(&mc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&mc->provider);
-del_provider:
- icc_provider_del(&mc->provider);
return err;
}
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 85bc936c02f9..00ed2b6a0d1b 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1351,15 +1351,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
@@ -1380,12 +1378,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index bd4e37b6552d..fd595c851a27 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -1021,15 +1021,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
@@ -1050,12 +1048,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 77706e9bc543..c91e9b7e2e01 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1533,15 +1533,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
emc->provider.aggregate = soc->icc_ops->aggregate;
emc->provider.xlate_extended = emc_of_icc_xlate_extended;
- err = icc_provider_add(&emc->provider);
- if (err)
- goto err_msg;
+ icc_provider_init(&emc->provider);
/* create External Memory Controller node */
node = icc_node_create(TEGRA_ICC_EMC);
if (IS_ERR(node)) {
err = PTR_ERR(node);
- goto del_provider;
+ goto err_msg;
}
node->name = "External Memory Controller";
@@ -1562,12 +1560,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
node->name = "External Memory (DRAM)";
icc_node_add(node, &emc->provider);
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
return 0;
remove_nodes:
icc_nodes_remove(&emc->provider);
-del_provider:
- icc_provider_del(&emc->provider);
err_msg:
dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 28ffb4377d98..3856d5c04c5f 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -50,9 +50,9 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d16 = write_r8d16,
};
-static int ad_dpot_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ad_dpot_i2c_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ad_dpot_bus_data bdata = {
.client = client,
.bops = &bops,
@@ -106,7 +106,7 @@ static struct i2c_driver ad_dpot_i2c_driver = {
.driver = {
.name = "ad_dpot",
},
- .probe = ad_dpot_i2c_probe,
+ .probe_new = ad_dpot_i2c_probe,
.remove = ad_dpot_i2c_remove,
.id_table = ad_dpot_id,
};
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9ddb854b8155..5c19097266fe 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1343,7 +1343,9 @@ static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) {
- if (vtag_l->vtag == vtag) {
+ /* The client on bus has one fixed vtag map */
+ if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
+ vtag_l->vtag == vtag) {
vtag_l->pending_read = false;
break;
}
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index e0dcd5c114db..7728fe685476 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -23,7 +23,7 @@
#include <linux/component.h>
#include <drm/drm_connector.h>
#include <drm/i915_component.h>
-#include <drm/i915_mei_hdcp_interface.h>
+#include <drm/i915_hdcp_interface.h>
#include "mei_hdcp.h"
@@ -52,13 +52,13 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
- session_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_init_in.header.buffer_len =
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
- session_init_in.port.physical_port = (u8)data->fw_ddi;
- session_init_in.port.attached_transcoder = (u8)data->fw_tc;
+ session_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
@@ -75,7 +75,7 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
return byte;
}
- if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_INITIATE_HDCP2_SESSION,
session_init_out.header.status);
@@ -122,13 +122,13 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
- verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_rxcert_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
- verify_rxcert_in.port.physical_port = (u8)data->fw_ddi;
- verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc;
+ verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
@@ -148,7 +148,7 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
return byte;
}
- if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_VERIFY_RECEIVER_CERT,
verify_rxcert_out.header.status);
@@ -194,12 +194,12 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
- send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
- send_hprime_in.port.physical_port = (u8)data->fw_ddi;
- send_hprime_in.port.attached_transcoder = (u8)data->fw_tc;
+ send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
@@ -218,7 +218,7 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return byte;
}
- if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
return -EIO;
@@ -251,13 +251,13 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
- pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
pairing_info_in.header.buffer_len =
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
- pairing_info_in.port.physical_port = (u8)data->fw_ddi;
- pairing_info_in.port.attached_transcoder = (u8)data->fw_tc;
+ pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
@@ -276,7 +276,7 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
return byte;
}
- if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
WIRED_AKE_SEND_PAIRING_INFO,
pairing_info_out.header.status);
@@ -311,12 +311,12 @@ mei_hdcp_initiate_locality_check(struct device *dev,
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
- lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
- lc_init_in.port.physical_port = (u8)data->fw_ddi;
- lc_init_in.port.attached_transcoder = (u8)data->fw_tc;
+ lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
@@ -330,7 +330,7 @@ mei_hdcp_initiate_locality_check(struct device *dev,
return byte;
}
- if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
return -EIO;
@@ -366,13 +366,13 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
- verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_lprime_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
- verify_lprime_in.port.physical_port = (u8)data->fw_ddi;
- verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc;
+ verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
@@ -391,7 +391,7 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return byte;
}
- if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VALIDATE_LOCALITY,
verify_lprime_out.header.status);
@@ -425,12 +425,12 @@ static int mei_hdcp_get_session_key(struct device *dev,
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
- get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
- get_skey_in.port.physical_port = (u8)data->fw_ddi;
- get_skey_in.port.attached_transcoder = (u8)data->fw_tc;
+ get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
@@ -445,7 +445,7 @@ static int mei_hdcp_get_session_key(struct device *dev,
return byte;
}
- if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_GET_SESSION_KEY, get_skey_out.header.status);
return -EIO;
@@ -489,13 +489,13 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
- verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_repeater_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
- verify_repeater_in.port.physical_port = (u8)data->fw_ddi;
- verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc;
+ verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
@@ -520,7 +520,7 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
return byte;
}
- if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VERIFY_REPEATER,
verify_repeater_out.header.status);
@@ -568,12 +568,12 @@ static int mei_hdcp_verify_mprime(struct device *dev,
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
- verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
- verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
- verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc;
+ verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
@@ -597,7 +597,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
return byte;
}
- if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_REPEATER_AUTH_STREAM_REQ,
verify_mprime_out.header.status);
@@ -630,12 +630,12 @@ static int mei_hdcp_enable_authentication(struct device *dev,
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
- enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
- enable_auth_in.port.physical_port = (u8)data->fw_ddi;
- enable_auth_in.port.attached_transcoder = (u8)data->fw_tc;
+ enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
@@ -652,7 +652,7 @@ static int mei_hdcp_enable_authentication(struct device *dev,
return byte;
}
- if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_ENABLE_AUTH, enable_auth_out.header.status);
return -EIO;
@@ -684,13 +684,13 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
- session_close_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_close_in.header.buffer_len =
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
- session_close_in.port.physical_port = (u8)data->fw_ddi;
- session_close_in.port.attached_transcoder = (u8)data->fw_tc;
+ session_close_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
@@ -706,7 +706,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
return byte;
}
- if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
session_close_out.header.status);
return -EIO;
@@ -715,7 +715,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
-static const struct i915_hdcp_component_ops mei_hdcp_ops = {
+static const struct i915_hdcp_ops mei_hdcp_ops = {
.owner = THIS_MODULE,
.initiate_hdcp2_session = mei_hdcp_initiate_session,
.verify_receiver_cert_prepare_km =
@@ -735,13 +735,12 @@ static const struct i915_hdcp_component_ops mei_hdcp_ops = {
static int mei_component_master_bind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- struct i915_hdcp_comp_master *comp_master =
- mei_cldev_get_drvdata(cldev);
+ struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
dev_dbg(dev, "%s\n", __func__);
comp_master->ops = &mei_hdcp_ops;
- comp_master->mei_dev = dev;
+ comp_master->hdcp_dev = dev;
ret = component_bind_all(dev, comp_master);
if (ret < 0)
return ret;
@@ -752,8 +751,7 @@ static int mei_component_master_bind(struct device *dev)
static void mei_component_master_unbind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- struct i915_hdcp_comp_master *comp_master =
- mei_cldev_get_drvdata(cldev);
+ struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
dev_dbg(dev, "%s\n", __func__);
component_unbind_all(dev, comp_master);
@@ -801,7 +799,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
static int mei_hdcp_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
- struct i915_hdcp_comp_master *comp_master;
+ struct i915_hdcp_master *comp_master;
struct component_match *master_match;
int ret;
@@ -846,8 +844,7 @@ enable_err_exit:
static void mei_hdcp_remove(struct mei_cl_device *cldev)
{
- struct i915_hdcp_comp_master *comp_master =
- mei_cldev_get_drvdata(cldev);
+ struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index ca09c8f83d6b..0683d83ec17a 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -11,358 +11,4 @@
#include <drm/display/drm_hdcp.h>
-/* me_hdcp_status: Enumeration of all HDCP Status Codes */
-enum me_hdcp_status {
- ME_HDCP_STATUS_SUCCESS = 0x0000,
-
- /* WiDi Generic Status Codes */
- ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
- ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
- ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
- ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
- ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
- ME_HDCP_STATUS_INVALID_PARAMS = 0x1005,
- ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
-
- /* WiDi Status Codes */
- ME_HDCP_INVALID_SESSION_STATE = 0x6000,
- ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
- ME_HDCP_SRM_INVALID_LENGTH = 0x6002,
- ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
- ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
- ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
- ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
- ME_HDCP_RX_REVOKED = 0x6007,
- ME_HDCP_H_VERIFICATION_FAILED = 0x6008,
- ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
- ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
- ME_HDCP_V_VERIFICATION_FAILED = 0x600B,
- ME_HDCP_L_VERIFICATION_FAILED = 0x600C,
- ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
- ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
- ME_HDCP_NONCE_GENERATION_FAILED = 0x600F,
- ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
- ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
- ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
- ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
- ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
- ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
-
- /* New status for HDCP 2.1 */
- ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
-
- /* New status code for HDCP 2.2 Rx */
- ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
- ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
- ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
- ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
- ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
- ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
- ME_HDCP_FAIL_NOT_EXPECTED = 0x6023,
- ME_HDCP_FAIL_HDCP_OFF = 0x6024,
- ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
- ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
- ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
- ME_HDCP_DMA_READ_ERROR = 0x6028,
- ME_HDCP_DMA_WRITE_ERROR = 0x6029,
- ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
- ME_HDCP_H264_PARSING_ERROR = 0x6031,
- ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
- ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
- ME_HDCP_TX_ACTIVE_ERROR = 0x6034,
- ME_HDCP_MODE_CHANGE_ERROR = 0x6035,
- ME_HDCP_STREAM_TYPE_ERROR = 0x6036,
- ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
-
- ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
- ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
- ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
- ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
- ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
- ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
-
- /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
- ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
-
- ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
-};
-
-#define HDCP_API_VERSION 0x00010000
-
-#define HDCP_M_LEN 16
-#define HDCP_KH_LEN 16
-
-/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
-/* Wired_Tx_AKE */
-#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
-#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
-
-#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
-#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
-#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
-
-#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
-#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
-
-#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
-#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
-
-#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
-#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
-
-/* Wired_Tx_LC */
-#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
-#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
-
-#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
-#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
-
-/* Wired_Tx_SKE */
-#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
-#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
-
-/* Wired_Tx_SKE */
-#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
-#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
-
-/* Wired_Tx_Repeater */
-#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
-#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
-
-#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
- 32 + 2 + 2)
-
-#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
-
-/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
-enum hdcp_command_id {
- _WIDI_COMMAND_BASE = 0x00030000,
- WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
- HDCP_GET_SRM_STATUS,
- HDCP_SEND_SRM_FRAGMENT,
-
- /* The wired HDCP Tx commands */
- _WIRED_COMMAND_BASE = 0x00031000,
- WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
- WIRED_VERIFY_RECEIVER_CERT,
- WIRED_AKE_SEND_HPRIME,
- WIRED_AKE_SEND_PAIRING_INFO,
- WIRED_INIT_LOCALITY_CHECK,
- WIRED_VALIDATE_LOCALITY,
- WIRED_GET_SESSION_KEY,
- WIRED_ENABLE_AUTH,
- WIRED_VERIFY_REPEATER,
- WIRED_REPEATER_AUTH_STREAM_REQ,
- WIRED_CLOSE_SESSION,
-
- _WIRED_COMMANDS_COUNT,
-};
-
-union encrypted_buff {
- u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
- u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
- struct {
- u8 e_kh_km[HDCP_KH_LEN];
- u8 m[HDCP_M_LEN];
- } __packed;
-};
-
-/* HDCP HECI message header. All header values are little endian. */
-struct hdcp_cmd_header {
- u32 api_version;
- u32 command_id;
- enum me_hdcp_status status;
- /* Length of the HECI message (excluding the header) */
- u32 buffer_len;
-} __packed;
-
-/* Empty command request or response. No data follows the header. */
-struct hdcp_cmd_no_data {
- struct hdcp_cmd_header header;
-} __packed;
-
-/* Uniquely identifies the hdcp port being addressed for a given command. */
-struct hdcp_port_id {
- u8 integrated_port_type;
- /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
- u8 physical_port;
- /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
- u8 attached_transcoder;
- u8 reserved;
-} __packed;
-
-/*
- * Data structures for integrated wired HDCP2 Tx in
- * support of the AKE protocol
- */
-/* HECI struct for integrated wired HDCP Tx session initiation. */
-struct wired_cmd_initiate_hdcp2_session_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 protocol; /* for HDMI vs DP */
-} __packed;
-
-struct wired_cmd_initiate_hdcp2_session_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 r_tx[HDCP_2_2_RTX_LEN];
- struct hdcp2_tx_caps tx_caps;
-} __packed;
-
-/* HECI struct for ending an integrated wired HDCP Tx session. */
-struct wired_cmd_close_session_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-struct wired_cmd_close_session_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
-struct wired_cmd_verify_receiver_cert_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- struct hdcp2_cert_rx cert_rx;
- u8 r_rx[HDCP_2_2_RRX_LEN];
- u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
-} __packed;
-
-struct wired_cmd_verify_receiver_cert_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 km_stored;
- u8 reserved[3];
- union encrypted_buff ekm_buff;
-} __packed;
-
-/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
-struct wired_cmd_ake_send_hprime_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 h_prime[HDCP_2_2_H_PRIME_LEN];
-} __packed;
-
-struct wired_cmd_ake_send_hprime_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-/*
- * HECI struct for sending in AKE pairing data generated by the Rx in an
- * integrated wired HDCP Tx session.
- */
-struct wired_cmd_ake_send_pairing_info_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
-} __packed;
-
-struct wired_cmd_ake_send_pairing_info_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
-/*
- * HECI struct for initiating locality check with an
- * integrated wired HDCP Tx session.
- */
-struct wired_cmd_init_locality_check_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-struct wired_cmd_init_locality_check_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 r_n[HDCP_2_2_RN_LEN];
-} __packed;
-
-/*
- * HECI struct for validating an Rx's LPrime value in an
- * integrated wired HDCP Tx session.
- */
-struct wired_cmd_validate_locality_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 l_prime[HDCP_2_2_L_PRIME_LEN];
-} __packed;
-
-struct wired_cmd_validate_locality_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-/*
- * Data structures for integrated wired HDCP2 Tx in support of the
- * SKE protocol
- */
-/* HECI struct for creating session key */
-struct wired_cmd_get_session_key_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-struct wired_cmd_get_session_key_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
- u8 r_iv[HDCP_2_2_RIV_LEN];
-} __packed;
-
-/* HECI struct for the Tx enable authentication command */
-struct wired_cmd_enable_auth_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 stream_type;
-} __packed;
-
-struct wired_cmd_enable_auth_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
-
-/*
- * Data structures for integrated wired HDCP2 Tx in support of
- * the repeater protocols
- */
-/*
- * HECI struct for verifying the downstream repeater's HDCP topology in an
- * integrated wired HDCP Tx session.
- */
-struct wired_cmd_verify_repeater_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
- u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
- u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
-} __packed;
-
-struct wired_cmd_verify_repeater_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 content_type_supported;
- u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
-} __packed;
-
-/*
- * HECI struct in support of stream management in an
- * integrated wired HDCP Tx session.
- */
-struct wired_cmd_repeater_auth_stream_req_in {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
- u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
- u8 m_prime[HDCP_2_2_MPRIME_LEN];
- __be16 k;
- struct hdcp2_streamid_type streams[];
-} __packed;
-
-struct wired_cmd_repeater_auth_stream_req_out {
- struct hdcp_cmd_header header;
- struct hdcp_port_id port;
-} __packed;
#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 5bf0d50d55a0..676d566f38dd 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -342,6 +342,12 @@ static void mei_me_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int mei_me_pci_prepare(struct device *device)
+{
+ pm_runtime_resume(device);
+ return 0;
+}
+
static int mei_me_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -398,7 +404,17 @@ static int mei_me_pci_resume(struct device *device)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
+
+static void mei_me_pci_complete(struct device *device)
+{
+ pm_runtime_suspend(device);
+}
+#else /* CONFIG_PM_SLEEP */
+
+#define mei_me_pci_prepare NULL
+#define mei_me_pci_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
@@ -501,6 +517,8 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev)
}
static const struct dev_pm_ops mei_me_pm_ops = {
+ .prepare = mei_me_pci_prepare,
+ .complete = mei_me_pci_complete,
SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
mei_me_pci_resume)
SET_RUNTIME_PM_OPS(
diff --git a/drivers/mmc/host/dw_mmc-starfive.c b/drivers/mmc/host/dw_mmc-starfive.c
index 40f5969b07a6..dab1508bf83c 100644
--- a/drivers/mmc/host/dw_mmc-starfive.c
+++ b/drivers/mmc/host/dw_mmc-starfive.c
@@ -51,7 +51,7 @@ static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot,
struct dw_mci *host = slot->host;
struct starfive_priv *priv = host->priv;
int rise_point = -1, fall_point = -1;
- int err, prev_err;
+ int err, prev_err = 0;
int i;
bool found = 0;
u32 regval;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 7ef828942df3..89953093e20c 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
MAX_POWER_ON_TIMEOUT, false, host, val,
reg);
if (ret)
- dev_warn(mmc_dev(host->mmc), "Power on failed\n");
+ dev_info(mmc_dev(host->mmc), "Power on failed\n");
}
}
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 5fcefcd0baca..3e0fff3f129e 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -206,8 +206,7 @@ static void pismo_remove(struct i2c_client *client)
kfree(pismo);
}
-static int pismo_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pismo_probe(struct i2c_client *client)
{
struct pismo_pdata *pdata = client->dev.platform_data;
struct pismo_eeprom eeprom;
@@ -260,7 +259,7 @@ static struct i2c_driver pismo_driver = {
.driver = {
.name = "pismo",
},
- .probe = pismo_probe,
+ .probe_new = pismo_probe,
.remove = pismo_remove,
.id_table = pismo_id,
};
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 1de87062c67b..3711d7f74600 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -221,7 +221,10 @@ static blk_status_t ubiblock_read(struct request *req)
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
- return errno_to_blk_status(ret);
+
+ blk_mq_end_request(req, errno_to_blk_status(ret));
+
+ return BLK_STS_OK;
}
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 00646aa315c3..236e5219c811 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1775,6 +1775,19 @@ void bond_lower_state_changed(struct slave *slave)
slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
} while (0)
+/* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
+ */
+static void bond_ether_setup(struct net_device *bond_dev)
+{
+ unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
+
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER | slave_flag;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
@@ -1866,10 +1879,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else {
- ether_setup(bond_dev);
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- }
+ else
+ bond_ether_setup(bond_dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
@@ -2289,9 +2300,7 @@ err_undo_flags:
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
- ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER;
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ bond_ether_setup(bond_dev);
}
}
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 8d916e2ee6c2..8dcc32e4e30e 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -93,20 +93,20 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (priv->can.clock.freq > 8000000)
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ if (of_property_read_bool(np, "bosch,divide-memory-clock"))
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
priv->bus_config |= BUSCFG_CBY;
- if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
- if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx1-input"))
priv->bus_config |= BUSCFG_DR1;
- if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-tx1-output"))
priv->bus_config |= BUSCFG_DT1;
- if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index e968322dfbf0..70887e0aece3 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
if (of_property_read_u32(of_port, "reg", &reg))
continue;
- if (reg < B53_CPU_PORT)
+ if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 729b36eeb2c4..7fc2155d93d6 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -319,7 +319,7 @@ static const u16 ksz8795_regs[] = {
[S_BROADCAST_CTRL] = 0x06,
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
- [P_XMII_CTRL_1] = 0x56,
+ [P_XMII_CTRL_1] = 0x06,
};
static const u32 ksz8795_masks[] = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3a15015bc409..02410ac439b7 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -393,12 +393,38 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
-/* Setup TX circuit including relevant PAD and driving */
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
+{
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
+ /* Disable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1, 0);
+
+ /* Set core clock into 500Mhz */
+ core_write(priv, CORE_GSWPLL_GRP2,
+ RG_GSWPLL_POSDIV_500M(1) |
+ RG_GSWPLL_FBKDIV_500M(25));
+
+ /* Enable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1,
+ RG_GSWPLL_EN_PRE |
+ RG_GSWPLL_POSDIV_200M(2) |
+ RG_GSWPLL_FBKDIV_200M(32));
+
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+}
+
+/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i, xtal;
+ u32 ncpo1, ssc_delta, trgint, xtal;
xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
@@ -412,11 +438,13 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
- /* PLL frequency: 125MHz */
- ncpo1 = 0x0c80;
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 150MHz: 1.2GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
@@ -436,61 +464,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
return -EINVAL;
}
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
-
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));
- /* Lower Tx Driving for TRGMII path */
- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- /* Disable MT7530 core and TRGMII Tx clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- /* Setup core clock for MT7530 */
- /* Disable PLL */
- core_write(priv, CORE_GSWPLL_GRP1, 0);
-
- /* Set core clock into 500Mhz */
- core_write(priv, CORE_GSWPLL_GRP2,
- RG_GSWPLL_POSDIV_500M(1) |
- RG_GSWPLL_FBKDIV_500M(25));
-
- /* Enable PLL */
- core_write(priv, CORE_GSWPLL_GRP1,
- RG_GSWPLL_EN_PRE |
- RG_GSWPLL_POSDIV_200M(2) |
- RG_GSWPLL_FBKDIV_200M(32));
+ if (trgint) {
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+ RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7,
+ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ }
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-
- /* Enable MT7530 core and TRGMII Tx clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- if (!trgint)
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7530_rmw(priv, MT7530_TRGMII_RD(i),
- RD_TAP_MASK, RD_TAP(16));
return 0;
}
@@ -2196,7 +2195,18 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ mt7530_pll_setup(priv);
+
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
+ /* Enable port 6 */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
val |= MHWTRAP_MANUAL;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 0a5d6c7bb128..30383c4f8fd0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
- return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return ETH_DATA_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ /* For families where we don't know how to alter the MTU,
+ * just accept any value up to ETH_DATA_LEN
+ */
+ if (!chip->info->ops->port_set_jumbo_size &&
+ !chip->info->ops->set_max_frame_size) {
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+
+ return 0;
+ }
+
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
@@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
else if (chip->info->ops->set_max_frame_size)
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
- else
- if (new_mtu > 1522)
- ret = -EINVAL;
mv88e6xxx_reg_unlock(chip);
return ret;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 323ec56e8a74..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -132,6 +132,16 @@ source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
+
+config FEALNX
+ tristate "Myson MTD-8xx PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+ cards. <http://www.myson.com.tw/>
+
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2fedbaa545eb..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
+obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 8da79eedc057..1d4f2f4d10f2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -850,11 +850,20 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES ||
- (ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, count)))
+ if (count < ENA_MIN_NUM_IO_QUEUES)
return -EINVAL;
+ if (!ena_xdp_legal_queue_count(adapter, count)) {
+ if (ena_xdp_present(adapter))
+ return -EINVAL;
+
+ xdp_clear_features_flag(netdev);
+ } else {
+ xdp_set_features_flag(netdev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT);
+ }
+
return ena_update_queue_count(adapter, count);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index d3999db7c6a2..cbfe7f977270 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4105,8 +4105,6 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
/* Set offload features */
ena_set_dev_offloads(feat, netdev);
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
-
adapter->max_mtu = feat->dev_attr.max_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->min_mtu = ENA_MIN_MTU;
@@ -4393,6 +4391,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
+ if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
+
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 1e8d902e1c8e..7f933175cbda 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames,
return num_frames - drop;
}
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+ struct net_device *dev,
+ struct aq_ring_buff_s *buff)
+{
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return NULL;
+
+ skb = xdp_build_skb_from_frame(xdpf, dev);
+ if (!skb)
+ return NULL;
+
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+}
+
static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
struct xdp_buff *xdp,
struct aq_ring_s *rx_ring,
@@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
prog = READ_ONCE(rx_ring->xdp_prog);
if (!prog)
- goto pass;
+ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
prefetchw(xdp->data_hard_start); /* xdp_frame write */
@@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
-pass:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
- goto out_aborted;
- skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
if (!skb)
goto out_aborted;
u64_stats_update_begin(&rx_ring->stats.rx.syncp);
++rx_ring->stats.rx.xdp_pass;
u64_stats_update_end(&rx_ring->stats.rx.syncp);
- aq_get_rxpages_xdp(buff, xdp);
return skb;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 3038386a5afd..1761df8fb7f9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
+ if (bgmac->in_init || !bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
bgmac_clk_enable(bgmac, flags);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
bgmac_idm_write(bgmac, BCMA_IOCTL,
bgmac_idm_read(bgmac, BCMA_IOCTL) &
~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac->in_init = true;
+
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ bgmac->in_init = false;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index e05ac92c0650..d73ef262991d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -472,6 +472,8 @@ struct bgmac {
int irq;
u32 int_mask;
+ bool in_init;
+
/* Current MAC state */
int mac_speed;
int mac_duplex;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5d4b1f2ebeac..e2e2c986c82b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3145,7 +3145,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -3153,8 +3153,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
- kfree(rxr->rx_tpa[0].agg_arr);
- rxr->rx_tpa[0].agg_arr = NULL;
+ for (j = 0; j < bp->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -3163,14 +3165,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j, total_aggs = 0;
+ int i, j;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -3184,12 +3185,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
continue;
- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
- rxr->rx_tpa[0].agg_arr = agg;
- if (!agg)
- return -ENOMEM;
- for (j = 1; j < bp->max_tpa; j++)
- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ for (j = 0; j < bp->max_tpa; j++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
@@ -6989,11 +6990,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
- if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
+ if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
- bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
- }
+
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
@@ -13204,8 +13203,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_hwrm_resources(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
- kfree(bp->edev);
- bp->edev = NULL;
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index dcb09fbe4007..c0628ac1b798 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2000,6 +2000,8 @@ struct bnxt {
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
+#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 4ec8bba18cdd..a3a3978a4d1c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -63,7 +63,7 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
ptp_info);
u64 ns = timespec64_to_ns(ts);
- if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
spin_lock_bh(&ptp->ptp_lock);
@@ -196,7 +196,7 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
spin_lock_bh(&ptp->ptp_lock);
@@ -205,34 +205,39 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
return 0;
}
+static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
+{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ netdev_err(bp->dev,
+ "ptp adjfine failed. rc = %d\n", rc);
+ return rc;
+}
+
static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- struct hwrm_port_mac_cfg_input *req;
struct bnxt *bp = ptp->bp;
- int rc = 0;
- if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) {
- spin_lock_bh(&ptp->ptp_lock);
- timecounter_read(&ptp->tc);
- ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_bh(&ptp->ptp_lock);
- } else {
- s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
-
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ if (BNXT_PTP_USE_RTC(bp))
+ return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
- rc = hwrm_req_send(ptp->bp, req);
- if (rc)
- netdev_err(ptp->bp->dev,
- "ptp adjfine failed. rc = %d\n", rc);
- }
- return rc;
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
}
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
@@ -879,7 +884,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
u64 ns;
int rc;
- if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
return -ENODEV;
if (!phc_cfg) {
@@ -932,13 +937,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
rc = bnxt_ptp_init_rtc(bp, phc_cfg);
if (rc)
goto out;
} else {
bnxt_ptp_timecounter_init(bp, true);
+ bnxt_ptp_adjfine_rtc(bp, 0);
}
ptp->ptp_info = bnxt_ptp_caps;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index d4cc9c371e7b..e7b5e28ee29f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -317,9 +317,11 @@ static void bnxt_aux_dev_release(struct device *dev)
{
struct bnxt_aux_priv *aux_priv =
container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+ struct bnxt *bp = netdev_priv(aux_priv->edev->net);
ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv->edev->ulp_tbl);
+ bp->edev = NULL;
kfree(aux_priv->edev);
kfree(aux_priv);
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6e141a8bbf43..66e30561569e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4990,7 +4990,7 @@ static int macb_probe(struct platform_device *pdev)
bp->jumbo_max_len = macb_config->jumbo_max_len;
bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
+ if (of_property_read_bool(np, "magic-packet"))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index e5c71f907852..d8d71bf97983 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -735,12 +735,17 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;
- if (nic->xdp_prog &&
- ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
- netdev_err(nic->netdev,
- "XDP mode, RXQs + TXQs > Max %d\n",
- nic->max_queues);
- return -EINVAL;
+ if (channel->tx_count + channel->rx_count > nic->max_queues) {
+ if (nic->xdp_prog) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
+ xdp_clear_features_flag(nic->netdev);
+ } else if (!pass1_silicon(nic->pdev)) {
+ xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC);
}
if (if_up)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b25313c7f6b..eff350e0bc2a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2218,7 +2218,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (!pass1_silicon(nic->pdev) &&
+ nic->rx_queues + nic->tx_queues <= nic->max_queues)
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index b21e56de6167..05a89ab6766c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1393,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (of_find_property(np, "davicom,ext-phy", NULL))
+ if (of_property_read_bool(np, "davicom,ext-phy"))
pdata->flags |= DM9000_PLATF_EXT_PHY;
- if (of_find_property(np, "davicom,no-eeprom", NULL))
+ if (of_property_read_bool(np, "davicom,no-eeprom"))
pdata->flags |= DM9000_PLATF_NO_EEPROM;
ret = of_get_mac_address(np, pdata->dev_addr);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
new file mode 100644
index 000000000000..ed18450fd2cc
--- /dev/null
+++ b/drivers/net/ethernet/fealnx.c
@@ -0,0 +1,1953 @@
+/*
+ Written 1998-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/pci-skeleton.html
+
+ Linux kernel updates:
+
+ Version 2.51, Nov 17, 2001 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+*/
+
+#define DRV_NAME "fealnx"
+
+static int debug; /* 1-> print debug message */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
+/* Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc. */
+/* Both 'options[]' and 'full_duplex[]' should exist for driver */
+/* interoperability. */
+/* The media type is usually passed in 'options[]'. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+
+/* Operational parameters that are set at compile time. */
+/* Keep the ring sizes a power of two for compile efficiency. */
+/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
+/* Making the Tx ring too large decreases the effectiveness of channel */
+/* bonding and packet priority. */
+/* There are no ill effects from too-large receive rings. */
+// 88-12-9 modify,
+// #define TX_RING_SIZE 16
+// #define RX_RING_SIZE 32
+#define TX_RING_SIZE 6
+#define RX_RING_SIZE 12
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. */
+#ifndef __alpha__
+#define USE_IO_OPS
+#endif
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
+/* This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+MODULE_AUTHOR("Myson or whoever");
+MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
+MODULE_LICENSE("GPL");
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
+
+enum {
+ MIN_REGION_SIZE = 136,
+};
+
+/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+enum chip_capability_flags {
+ HAS_MII_XCVR,
+ HAS_CHIP_XCVR,
+};
+
+/* 89/6/13 add, */
+/* for different PHY */
+enum phy_type_flags {
+ MysonPHY = 1,
+ AhdocPHY = 2,
+ SeeqPHY = 3,
+ MarvellPHY = 4,
+ Myson981 = 5,
+ LevelOnePHY = 6,
+ OtherPHY = 10,
+};
+
+struct chip_info {
+ char *chip_name;
+ int flags;
+};
+
+static const struct chip_info skel_netdrv_tbl[] = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+};
+
+/* Offsets to the Command and Status Registers. */
+enum fealnx_offsets {
+ PAR0 = 0x0, /* physical address 0-3 */
+ PAR1 = 0x04, /* physical address 4-5 */
+ MAR0 = 0x08, /* multicast address 0-3 */
+ MAR1 = 0x0C, /* multicast address 4-7 */
+ FAR0 = 0x10, /* flow-control address 0-3 */
+ FAR1 = 0x14, /* flow-control address 4-5 */
+ TCRRCR = 0x18, /* receive & transmit configuration */
+ BCR = 0x1C, /* bus command */
+ TXPDR = 0x20, /* transmit polling demand */
+ RXPDR = 0x24, /* receive polling demand */
+ RXCWP = 0x28, /* receive current word pointer */
+ TXLBA = 0x2C, /* transmit list base address */
+ RXLBA = 0x30, /* receive list base address */
+ ISR = 0x34, /* interrupt status */
+ IMR = 0x38, /* interrupt mask */
+ FTH = 0x3C, /* flow control high/low threshold */
+ MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
+ TALLY = 0x44, /* tally counters for crc and mpa */
+ TSR = 0x48, /* tally counter for transmit status */
+ BMCRSR = 0x4c, /* basic mode control and status */
+ PHYIDENTIFIER = 0x50, /* phy identifier */
+ ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
+ partner ability */
+ ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
+ BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ RFCON = 0x00020000, /* receive flow control xon packet */
+ RFCOFF = 0x00010000, /* receive flow control xoff packet */
+ LSCStatus = 0x00008000, /* link status change */
+ ANCStatus = 0x00004000, /* autonegotiation completed */
+ FBE = 0x00002000, /* fatal bus error */
+ FBEMask = 0x00001800, /* mask bit12-11 */
+ ParityErr = 0x00000000, /* parity error */
+ TargetErr = 0x00001000, /* target abort */
+ MasterErr = 0x00000800, /* master error */
+ TUNF = 0x00000400, /* transmit underflow */
+ ROVF = 0x00000200, /* receive overflow */
+ ETI = 0x00000100, /* transmit early int */
+ ERI = 0x00000080, /* receive early int */
+ CNTOVF = 0x00000040, /* counter overflow */
+ RBU = 0x00000020, /* receive buffer unavailable */
+ TBU = 0x00000010, /* transmit buffer unavilable */
+ TI = 0x00000008, /* transmit interrupt */
+ RI = 0x00000004, /* receive interrupt */
+ RxErr = 0x00000002, /* receive error */
+};
+
+/* Bits in the NetworkConfig register, W for writing, R for reading */
+/* FIXME: some names are invented by me. Marked with (name?) */
+/* If you have docs and know bit names, please fix 'em */
+enum rx_mode_bits {
+ CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
+ CR_W_FD = 0x00100000, /* full duplex */
+ CR_W_PS10 = 0x00080000, /* 10 mbit */
+ CR_W_TXEN = 0x00040000, /* tx enable (name?) */
+ CR_W_PS1000 = 0x00010000, /* 1000 mbit */
+ /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
+ CR_W_RXMODEMASK = 0x000000e0,
+ CR_W_PROM = 0x00000080, /* promiscuous mode */
+ CR_W_AB = 0x00000040, /* accept broadcast */
+ CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_ARP = 0x00000008, /* receive runt pkt */
+ CR_W_ALP = 0x00000004, /* receive long pkt */
+ CR_W_SEP = 0x00000002, /* receive error pkt */
+ CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
+
+ CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
+ CR_R_FD = 0x00100000, /* full duplex detected */
+ CR_R_PS10 = 0x00080000, /* 10 mbit detected */
+ CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct fealnx_desc {
+ s32 status;
+ s32 control;
+ u32 buffer;
+ u32 next_desc;
+ struct fealnx_desc *next_desc_logical;
+ struct sk_buff *skbuff;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+/* Bits in network_desc.status */
+enum rx_desc_status_bits {
+ RXOWN = 0x80000000, /* own bit */
+ FLNGMASK = 0x0fff0000, /* frame length */
+ FLNGShift = 16,
+ MARSTATUS = 0x00004000, /* multicast address received */
+ BARSTATUS = 0x00002000, /* broadcast address received */
+ PHYSTATUS = 0x00001000, /* physical address received */
+ RXFSD = 0x00000800, /* first descriptor */
+ RXLSD = 0x00000400, /* last descriptor */
+ ErrorSummary = 0x80, /* error summary */
+ RUNTPKT = 0x40, /* runt packet received */
+ LONGPKT = 0x20, /* long packet received */
+ FAE = 0x10, /* frame align error */
+ CRC = 0x08, /* crc error */
+ RXER = 0x04, /* receive error */
+};
+
+enum rx_desc_control_bits {
+ RXIC = 0x00800000, /* interrupt control */
+ RBSShift = 0,
+};
+
+enum tx_desc_status_bits {
+ TXOWN = 0x80000000, /* own bit */
+ JABTO = 0x00004000, /* jabber timeout */
+ CSL = 0x00002000, /* carrier sense lost */
+ LC = 0x00001000, /* late collision */
+ EC = 0x00000800, /* excessive collision */
+ UDF = 0x00000400, /* fifo underflow */
+ DFR = 0x00000200, /* deferred */
+ HF = 0x00000100, /* heartbeat fail */
+ NCRMask = 0x000000ff, /* collision retry count */
+ NCRShift = 0,
+};
+
+enum tx_desc_control_bits {
+ TXIC = 0x80000000, /* interrupt control */
+ ETIControl = 0x40000000, /* early transmit interrupt */
+ TXLD = 0x20000000, /* last descriptor */
+ TXFD = 0x10000000, /* first descriptor */
+ CRCEnable = 0x08000000, /* crc control */
+ PADEnable = 0x04000000, /* padding control */
+ RetryTxLC = 0x02000000, /* retry late collision */
+ PKTSMask = 0x3ff800, /* packet size bit21-11 */
+ PKTSShift = 11,
+ TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
+ TBSShift = 0,
+};
+
+/* BootROM/EEPROM/MII Management Register */
+#define MASK_MIIR_MII_READ 0x00000000
+#define MASK_MIIR_MII_WRITE 0x00000008
+#define MASK_MIIR_MII_MDO 0x00000004
+#define MASK_MIIR_MII_MDI 0x00000002
+#define MASK_MIIR_MII_MDC 0x00000001
+
+/* ST+OP+PHYAD+REGAD+TA */
+#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
+#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Myson PHY */
+/* ------------------------------------------------------------------------- */
+#define MysonPHYID 0xd0000302
+/* 89-7-27 add, (begin) */
+#define MysonPHYID0 0x0302
+#define StatusRegister 18
+#define SPEED100 0x0400 // bit10
+#define FULLMODE 0x0800 // bit11
+/* 89-7-27 add, (end) */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Seeq 80225 PHY */
+/* ------------------------------------------------------------------------- */
+#define SeeqPHYID0 0x0016
+
+#define MIIRegister18 18
+#define SPD_DET_100 0x80
+#define DPLX_DET_FULL 0x40
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Ahdoc 101 PHY */
+/* ------------------------------------------------------------------------- */
+#define AhdocPHYID0 0x0022
+
+#define DiagnosticReg 18
+#define DPLX_FULL 0x0800
+#define Speed_100 0x0400
+
+/* 89/6/13 add, */
+/* -------------------------------------------------------------------------- */
+/* Constants */
+/* -------------------------------------------------------------------------- */
+#define MarvellPHYID0 0x0141
+#define LevelOnePHYID0 0x0013
+
+#define MII1000BaseTControlReg 9
+#define MII1000BaseTStatusReg 10
+#define SpecificReg 17
+
+/* for 1000BaseT Control Register */
+#define PHYAbletoPerform1000FullDuplex 0x0200
+#define PHYAbletoPerform1000HalfDuplex 0x0100
+#define PHY1000AbilityMask 0x300
+
+// for phy specific status register, marvell phy.
+#define SpeedMask 0x0c000
+#define Speed_1000M 0x08000
+#define Speed_100M 0x4000
+#define Speed_10M 0
+#define Full_Duplex 0x2000
+
+// 89/12/29 add, for phy specific status register, levelone phy, (begin)
+#define LXT1000_100M 0x08000
+#define LXT1000_1000M 0x0c000
+#define LXT1000_Full 0x200
+// 89/12/29 add, for phy specific status register, levelone phy, (end)
+
+/* for 3-in-1 case, BMCRSR register */
+#define LinkIsUp2 0x00040000
+
+/* for PHY */
+#define LinkIsUp 0x0004
+
+
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct fealnx_desc *rx_ring;
+ struct fealnx_desc *tx_ring;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ spinlock_t lock;
+
+ /* Media monitoring timer. */
+ struct timer_list timer;
+
+ /* Reset timer */
+ struct timer_list reset_timer;
+ int reset_timer_armed;
+ unsigned long crvalue_sv;
+ unsigned long imrvalue_sv;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int flags;
+ struct pci_dev *pci_dev;
+ unsigned long crvalue;
+ unsigned long bcrvalue;
+ unsigned long imrvalue;
+ struct fealnx_desc *cur_rx;
+ struct fealnx_desc *lack_rxbuf;
+ int really_rx_count;
+ struct fealnx_desc *cur_tx;
+ struct fealnx_desc *cur_tx_copy;
+ int really_tx_count;
+ int free_tx_count;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int linkok;
+ unsigned int line_speed;
+ unsigned int duplexmode;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int PHYType;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[2]; /* MII device addresses. */
+ struct mii_if_info mii;
+ void __iomem *mem;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void getlinktype(struct net_device *dev);
+static void getlinkstatus(struct net_device *dev);
+static void netdev_timer(struct timer_list *t);
+static void reset_timer(struct timer_list *t);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static int netdev_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+static void reset_rx_descriptors(struct net_device *dev);
+static void reset_tx_descriptors(struct net_device *dev);
+
+static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
+ break;
+ }
+}
+
+
+static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
+ == (CR_R_RXSTOP+CR_R_TXSTOP) )
+ break;
+ }
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_eth_ioctl = mii_ioctl,
+ .ndo_tx_timeout = fealnx_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int fealnx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, option, err, irq;
+ static int card_idx = -1;
+ char boardname[12];
+ void __iomem *ioaddr;
+ unsigned long len;
+ unsigned int chip_id = ent->driver_data;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+ card_idx++;
+ sprintf(boardname, "fealnx%d", card_idx);
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+ pci_set_master(pdev);
+
+ len = pci_resource_len(pdev, bar);
+ if (len < MIN_REGION_SIZE) {
+ dev_err(&pdev->dev,
+ "region size %ld too small, aborting\n", len);
+ return -ENODEV;
+ }
+
+ i = pci_request_regions(pdev, boardname);
+ if (i)
+ return i;
+
+ irq = pdev->irq;
+
+ ioaddr = pci_iomap(pdev, bar, len);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+
+ dev = alloc_etherdev(sizeof(struct netdev_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* read ethernet id */
+ for (i = 0; i < 6; ++i)
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Make certain the descriptor lists are aligned. */
+ np = netdev_priv(dev);
+ np->mem = ioaddr;
+ spin_lock_init(&np->lock);
+ np->pci_dev = pdev;
+ np->flags = skel_netdrv_tbl[chip_id].flags;
+ pci_set_drvdata(pdev, dev);
+ np->mii.dev = dev;
+ np->mii.mdio_read = mdio_read;
+ np->mii.mdio_write = mdio_write;
+ np->mii.phy_id_mask = 0x1f;
+ np->mii.reg_num_mask = 0x1f;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ np->rx_ring = ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_rx;
+ }
+ np->tx_ring = ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ /* find the connected MII xcvrs */
+ if (np->flags == HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+
+ for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
+ phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ dev_info(&pdev->dev,
+ "MII PHY found at address %d, status "
+ "0x%4.4x.\n", phy, mii_status);
+ /* get phy type */
+ {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 2);
+ if (data == SeeqPHYID0)
+ np->PHYType = SeeqPHY;
+ else if (data == AhdocPHYID0)
+ np->PHYType = AhdocPHY;
+ else if (data == MarvellPHYID0)
+ np->PHYType = MarvellPHY;
+ else if (data == MysonPHYID0)
+ np->PHYType = Myson981;
+ else if (data == LevelOnePHYID0)
+ np->PHYType = LevelOnePHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ }
+ }
+
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0)
+ dev_warn(&pdev->dev,
+ "MII PHY not found -- this device may "
+ "not operate correctly.\n");
+ } else {
+ np->phys[0] = 32;
+/* 89/6/23 add, (begin) */
+ /* get phy type */
+ if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
+ np->PHYType = MysonPHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ np->mii.phy_id = np->phys[0];
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii.full_duplex = 1;
+ np->default_port = option & 15;
+ }
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii.full_duplex = full_duplex[card_idx];
+
+ if (np->mii.full_duplex) {
+ dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
+/* 89/6/13 add, (begin) */
+// if (np->PHYType==MarvellPHY)
+ if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 9);
+ data = (data & 0xfcff) | 0x0200;
+ mdio_write(dev, np->phys[0], 9, data);
+ }
+/* 89/6/13 add, (end) */
+ if (np->flags == HAS_MII_XCVR)
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ else
+ iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
+ np->mii.force_media = 1;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out_free_tx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
+ dev->dev_addr, irq);
+
+ return 0;
+
+err_out_free_tx:
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+err_out_free_rx:
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+ return err;
+}
+
+
+static void fealnx_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ unregister_netdev(dev);
+ pci_iounmap(pdev, np->mem);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ } else
+ printk(KERN_ERR "fealnx: remove for unknown device\n");
+}
+
+
+static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
+{
+ ulong miir;
+ int i;
+ unsigned int mask, data;
+
+ /* enable MII output */
+ miir = (ulong) ioread32(miiport);
+ miir &= 0xfffffff0;
+
+ miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
+
+ /* send 32 1's preamble */
+ for (i = 0; i < 32; i++) {
+ /* low MDC; MDO is already high (miir) */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ }
+
+ /* calculate ST+OP+PHYAD+REGAD+TA */
+ data = opcode | (phyad << 7) | (regad << 2);
+
+ /* sent out */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+
+ iowrite32(miir, miiport);
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ if (mask == 0x2 && opcode == OP_READ)
+ miir &= ~MASK_MIIR_MII_WRITE;
+ }
+ return miir;
+}
+
+
+static int mdio_read(struct net_device *dev, int phyad, int regad)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask, data;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
+
+ /* read data */
+ mask = 0x8000;
+ data = 0;
+ while (mask) {
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* read MDI */
+ miir = ioread32(miiport);
+ if (miir & MASK_MIIR_MII_MDI)
+ data |= mask;
+
+ /* high MDC, and wait */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ return data & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
+
+ /* write data */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ const int irq = np->pci_dev->irq;
+ int rc, i;
+
+ iowrite32(0x00000001, ioaddr + BCR); /* Reset */
+
+ rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return -EAGAIN;
+
+ for (i = 0; i < 3; i++)
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
+ ioaddr + PAR0 + i*2);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
+ iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword burst.
+ 586: no burst limit.
+ Burst length 5:3
+ 0 0 0 1
+ 0 0 1 4
+ 0 1 0 8
+ 0 1 1 16
+ 1 0 0 32
+ 1 0 1 64
+ 1 1 0 128
+ 1 1 1 256
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list.
+ FIXME (Ueimor): optimistic for alpha + posted writes ? */
+
+ np->bcrvalue = 0x10; /* little-endian, 8 burst length */
+#ifdef __BIG_ENDIAN
+ np->bcrvalue |= 0x04; /* big-endian */
+#endif
+
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
+ if (boot_cpu_data.x86 <= 4)
+ np->crvalue = 0xa00;
+ else
+#endif
+ np->crvalue = 0xe00; /* rx 128 burst length */
+
+
+// 89/12/29 add,
+// 90/1/16 modify,
+// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
+ np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
+ if (np->pci_dev->device == 0x891) {
+ np->bcrvalue |= 0x200; /* set PROG bit */
+ np->crvalue |= CR_W_ENH; /* set enhanced bit */
+ np->imrvalue |= ETI;
+ }
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ iowrite32(0, ioaddr + RXPDR);
+// 89/9/1 modify,
+// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->mii.full_duplex = np->mii.force_media;
+ getlinkstatus(dev);
+ if (np->linkok)
+ getlinktype(dev);
+ __set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ timer_setup(&np->timer, netdev_timer, 0);
+ np->timer.expires = RUN_AT(3 * HZ);
+
+ /* timer handler */
+ add_timer(&np->timer);
+
+ timer_setup(&np->reset_timer, reset_timer, 0);
+ np->reset_timer_armed = 0;
+ return rc;
+}
+
+
+static void getlinkstatus(struct net_device *dev)
+/* function: Routine will read MII Status Register to get link status. */
+/* input : dev... pointer to the adapter block. */
+/* output : none. */
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int i, DelayTime = 0x1000;
+
+ np->linkok = 0;
+
+ if (np->PHYType == MysonPHY) {
+ for (i = 0; i < DelayTime; ++i) {
+ if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ } else {
+ for (i = 0; i < DelayTime; ++i) {
+ if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ }
+}
+
+
+static void getlinktype(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (np->PHYType == MysonPHY) { /* 3-in-1 case */
+ if (ioread32(np->mem + TCRRCR) & CR_R_FD)
+ np->duplexmode = 2; /* full duplex */
+ else
+ np->duplexmode = 1; /* half duplex */
+ if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
+ np->line_speed = 1; /* 10M */
+ else
+ np->line_speed = 2; /* 100M */
+ } else {
+ if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], MIIRegister18);
+ if (data & SPD_DET_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_DET_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ } else if (np->PHYType == AhdocPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], DiagnosticReg);
+ if (data & Speed_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ }
+/* 89/6/13 add, (begin) */
+ else if (np->PHYType == MarvellPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & Full_Duplex)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == Speed_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == Speed_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+/* 89/6/13 add, (end) */
+/* 89/7/27 add, (begin) */
+ else if (np->PHYType == Myson981) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], StatusRegister);
+
+ if (data & SPEED100)
+ np->line_speed = 2;
+ else
+ np->line_speed = 1;
+
+ if (data & FULLMODE)
+ np->duplexmode = 2;
+ else
+ np->duplexmode = 1;
+ }
+/* 89/7/27 add, (end) */
+/* 89/12/29 add */
+ else if (np->PHYType == LevelOnePHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & LXT1000_Full)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == LXT1000_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == LXT1000_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+ np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
+ if (np->line_speed == 1)
+ np->crvalue |= CR_W_PS10;
+ else if (np->line_speed == 3)
+ np->crvalue |= CR_W_PS1000;
+ if (np->duplexmode == 2)
+ np->crvalue |= CR_W_FD;
+ }
+}
+
+
+/* Take lock before calling this */
+static void allocate_rx_buffers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* allocate skb for rx buffers */
+ while (np->really_rx_count != RX_RING_SIZE) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+
+ while (np->lack_rxbuf->skbuff)
+ np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
+
+ np->lack_rxbuf->skbuff = skb;
+ np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ np->lack_rxbuf->status = RXOWN;
+ ++np->really_rx_count;
+ }
+}
+
+
+static void netdev_timer(struct timer_list *t)
+{
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = np->mii.dev;
+ void __iomem *ioaddr = np->mem;
+ int old_crvalue = np->crvalue;
+ unsigned int old_linkok = np->linkok;
+ unsigned long flags;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
+ ioread32(ioaddr + TCRRCR));
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->flags == HAS_MII_XCVR) {
+ getlinkstatus(dev);
+ if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
+ getlinktype(dev);
+ if (np->crvalue != old_crvalue) {
+ stop_nic_rxtx(ioaddr, np->crvalue);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+ }
+
+ allocate_rx_buffers(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ np->timer.expires = RUN_AT(10 * HZ);
+ add_timer(&np->timer);
+}
+
+
+/* Take lock before calling */
+/* Reset chip and disable rx, tx and interrupts */
+static void reset_and_disable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int delay=51;
+
+ /* Reset the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0, ioaddr + IMR);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
+ We surely wait too long (address+data phase). Who cares? */
+ while (--delay) {
+ ioread32(ioaddr + BCR);
+ rmb();
+ }
+}
+
+
+/* Take lock before calling */
+/* Restore chip after reset */
+static void enable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ reset_rx_descriptors(dev);
+
+ iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
+ ioaddr + TXLBA);
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ ioaddr + RXLBA);
+
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ iowrite32(0, ioaddr + RXPDR);
+ __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ iowrite32(0, ioaddr + TXPDR);
+}
+
+
+static void reset_timer(struct timer_list *t)
+{
+ struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct net_device *dev = np->mii.dev;
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
+
+ spin_lock_irqsave(&np->lock, flags);
+ np->crvalue = np->crvalue_sv;
+ np->imrvalue = np->imrvalue_sv;
+
+ reset_and_disable_rxtx(dev);
+ /* works for me without this:
+ reset_tx_descriptors(dev); */
+ enable_rxtx(dev);
+ netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
+
+ np->reset_timer_armed = 0;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ {
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_CONT " %8.8x",
+ (unsigned int) np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ reset_and_disable_rxtx(dev);
+ reset_tx_descriptors(dev);
+ enable_rxtx(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev); /* or .._start_.. ?? */
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* initialize rx variables */
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->cur_rx = &np->rx_ring[0];
+ np->lack_rxbuf = np->rx_ring;
+ np->really_rx_count = 0;
+
+ /* initial rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
+ np->rx_ring[i].next_desc = np->rx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
+ np->rx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last rx descriptor */
+ np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+ np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
+
+ /* allocate skb for rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+
+ if (skb == NULL) {
+ np->lack_rxbuf = &np->rx_ring[i];
+ break;
+ }
+
+ ++np->really_rx_count;
+ np->rx_ring[i].skbuff = skb;
+ np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ np->rx_ring[i].status = RXOWN;
+ np->rx_ring[i].control |= RXIC;
+ }
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].status = 0;
+ /* do we need np->tx_ring[i].control = XXX; ?? */
+ np->tx_ring[i].next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
+ np->tx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last tx descriptor */
+ np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ np->cur_tx_copy->skbuff = skb;
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ struct fealnx_desc *next;
+
+ /* for the first descriptor */
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data, BPT,
+ DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
+
+ /* for the last descriptor */
+ next = np->cur_tx_copy->next_desc_logical;
+ next->skbuff = skb;
+ next->control = TXIC | TXLD | CRCEnable | PADEnable;
+ next->control |= (skb->len << PKTSShift); /* pkt size */
+ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ next->buffer = dma_map_single(&ep->pci_dev->dev,
+ skb->data + BPT, skb->len - BPT,
+ DMA_TO_DEVICE);
+
+ next->status = TXOWN;
+ np->cur_tx_copy->status = TXOWN;
+
+ np->cur_tx_copy = next->next_desc_logical;
+ np->free_tx_count -= 2;
+ } else {
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+ }
+#endif
+
+ if (np->free_tx_count < 2)
+ netif_stop_queue(dev);
+ ++np->really_tx_count;
+ iowrite32(0, np->mem + TXPDR);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+
+/* Take lock before calling */
+/* Chip probably hosed tx ring. Clean up. */
+static void reset_tx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur;
+ int i;
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ cur = &np->tx_ring[i];
+ if (cur->skbuff) {
+ dma_unmap_single(&np->pci_dev->dev, cur->buffer,
+ cur->skbuff->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(cur->skbuff);
+ cur->skbuff = NULL;
+ }
+ cur->status = 0;
+ cur->control = 0; /* needed? */
+ /* probably not needed. We do it for purely paranoid reasons */
+ cur->next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ cur->next_desc_logical = &np->tx_ring[i + 1];
+ }
+ /* for the last tx descriptor */
+ np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+/* Take lock and stop rx before calling this */
+static void reset_rx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur = np->cur_rx;
+ int i;
+
+ allocate_rx_buffers(dev);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (cur->skbuff)
+ cur->status = RXOWN;
+ cur = cur->next_desc_logical;
+ }
+
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ np->mem + RXLBA);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ long boguscnt = max_interrupt_work;
+ unsigned int num_tx = 0;
+ int handled = 0;
+
+ spin_lock(&np->lock);
+
+ iowrite32(0, ioaddr + IMR);
+
+ do {
+ u32 intr_status = ioread32(ioaddr + ISR);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status, ioaddr + ISR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
+ intr_status);
+
+ if (!(intr_status & np->imrvalue))
+ break;
+
+ handled = 1;
+
+// 90/1/16 delete,
+//
+// if (intr_status & FBE)
+// { /* fatal error */
+// stop_nic_tx(ioaddr, 0);
+// stop_nic_rx(ioaddr, 0);
+// break;
+// };
+
+ if (intr_status & TUNF)
+ iowrite32(0, ioaddr + TXPDR);
+
+ if (intr_status & CNTOVF) {
+ /* missed pkts */
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ if (intr_status & (RI | RBU)) {
+ if (intr_status & RI)
+ netdev_rx(dev);
+ else {
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+
+ while (np->really_tx_count) {
+ long tx_status = np->cur_tx->status;
+ long tx_control = np->cur_tx->control;
+
+ if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
+ struct fealnx_desc *next;
+
+ next = np->cur_tx->next_desc_logical;
+ tx_status = next->status;
+ tx_control = next->control;
+ }
+
+ if (tx_status & TXOWN)
+ break;
+
+ if (!(np->crvalue & CR_W_ENH)) {
+ if (tx_status & (CSL | LC | EC | UDF | HF)) {
+ dev->stats.tx_errors++;
+ if (tx_status & EC)
+ dev->stats.tx_aborted_errors++;
+ if (tx_status & CSL)
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & LC)
+ dev->stats.tx_window_errors++;
+ if (tx_status & UDF)
+ dev->stats.tx_fifo_errors++;
+ if ((tx_status & HF) && np->mii.full_duplex == 0)
+ dev->stats.tx_heartbeat_errors++;
+
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+
+ dev->stats.collisions +=
+ ((tx_status & NCRMask) >> NCRShift);
+ dev->stats.tx_packets++;
+ }
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+ dev->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ np->cur_tx->buffer,
+ np->cur_tx->skbuff->len,
+ DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->cur_tx->skbuff);
+ np->cur_tx->skbuff = NULL;
+ --np->really_tx_count;
+ if (np->cur_tx->control & TXLD) {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ ++np->free_tx_count;
+ } else {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->free_tx_count += 2;
+ }
+ num_tx++;
+ } /* end of for loop */
+
+ if (num_tx && np->free_tx_count >= 2)
+ netif_wake_queue(dev);
+
+ /* read transmit status for enhanced mode only */
+ if (np->crvalue & CR_W_ENH) {
+ long data;
+
+ data = ioread32(ioaddr + TSR);
+ dev->stats.tx_errors += (data & 0xff000000) >> 24;
+ dev->stats.tx_aborted_errors +=
+ (data & 0xff000000) >> 24;
+ dev->stats.tx_window_errors +=
+ (data & 0x00ff0000) >> 16;
+ dev->stats.collisions += (data & 0x0000ffff);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ if (!np->reset_timer_armed) {
+ np->reset_timer_armed = 1;
+ np->reset_timer.expires = RUN_AT(HZ/2);
+ add_timer(&np->reset_timer);
+ stop_nic_rxtx(ioaddr, 0);
+ netif_stop_queue(dev);
+ /* or netif_tx_disable(dev); ?? */
+ /* Prevent other paths from enabling tx,rx,intrs */
+ np->crvalue_sv = np->crvalue;
+ np->imrvalue_sv = np->imrvalue;
+ np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
+ np->imrvalue = 0;
+ }
+
+ break;
+ }
+ } while (1);
+
+ /* read the tally counters */
+ /* missed pkts */
+ dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ spin_unlock(&np->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
+ s32 rx_status = np->cur_rx->status;
+
+ if (np->really_rx_count == 0)
+ break;
+
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
+
+ if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
+ (rx_status & ErrorSummary)) {
+ if (rx_status & ErrorSummary) { /* there was a fatal error */
+ if (debug)
+ printk(KERN_DEBUG
+ "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, rx_status);
+
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (rx_status & (LONGPKT | RUNTPKT))
+ dev->stats.rx_length_errors++;
+ if (rx_status & RXER)
+ dev->stats.rx_frame_errors++;
+ if (rx_status & CRC)
+ dev->stats.rx_crc_errors++;
+ } else {
+ int need_to_reset = 0;
+ int desno = 0;
+
+ if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
+ struct fealnx_desc *cur;
+
+ /* check this packet is received completely? */
+ cur = np->cur_rx;
+ while (desno <= np->really_rx_count) {
+ ++desno;
+ if ((!(cur->status & RXOWN)) &&
+ (cur->status & RXLSD))
+ break;
+ /* goto next rx descriptor */
+ cur = cur->next_desc_logical;
+ }
+ if (desno > np->really_rx_count)
+ need_to_reset = 1;
+ } else /* RXLSD did not find, something error */
+ need_to_reset = 1;
+
+ if (need_to_reset == 0) {
+ int i;
+
+ dev->stats.rx_length_errors++;
+
+ /* free all rx descriptors related this long pkt */
+ for (i = 0; i < desno; ++i) {
+ if (!np->cur_rx->skbuff) {
+ printk(KERN_DEBUG
+ "%s: I'm scared\n", dev->name);
+ break;
+ }
+ np->cur_rx->status = RXOWN;
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ }
+ continue;
+ } else { /* rx error, need to reset this chip */
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ break; /* exit the while loop */
+ }
+ } else { /* this received pkt is ok */
+
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
+
+#ifndef final_version
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, rx_status);
+#endif
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ /* Call copy + cksum if available. */
+
+#if ! defined(__alpha__)
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ skb_put_data(skb, np->cur_rx->skbuff->data,
+ pkt_len);
+#endif
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb_put(skb = np->cur_rx->skbuff, pkt_len);
+ np->cur_rx->skbuff = NULL;
+ --np->really_rx_count;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ } /* end of while loop */
+
+ /* allocate skb for rx buffers */
+ allocate_rx_buffers(dev);
+
+ return 0;
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev)) {
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ return &dev->stats;
+}
+
+
+/* for dev->set_multicast_list */
+static void set_rx_mode(struct net_device *dev)
+{
+ spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
+ unsigned long flags;
+ spin_lock_irqsave(lp, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore(lp, flags);
+}
+
+
+/* Take lock before calling */
+static void __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_AB | CR_W_AM;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ mc_filter[bit >> 5] |= (1 << bit);
+ }
+ rx_mode = CR_W_AB | CR_W_AM;
+ }
+
+ stop_nic_rxtx(ioaddr, np->crvalue);
+
+ iowrite32(mc_filter[0], ioaddr + MAR0);
+ iowrite32(mc_filter[1], ioaddr + MAR1);
+ np->crvalue &= ~CR_W_RXMODEMASK;
+ np->crvalue |= rx_mode;
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+}
+
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock_irq(&np->lock);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+}
+
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
+};
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0x0000, ioaddr + IMR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ del_timer_sync(&np->timer);
+ del_timer_sync(&np->reset_timer);
+
+ free_irq(np->pci_dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->rx_ring[i].skbuff;
+
+ np->rx_ring[i].status = 0;
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_ring[i].buffer, np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ np->rx_ring[i].skbuff = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->tx_ring[i].buffer, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ np->tx_ring[i].skbuff = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id fealnx_pci_tbl[] = {
+ {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ {} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
+
+
+static struct pci_driver fealnx_driver = {
+ .name = "fealnx",
+ .id_table = fealnx_pci_tbl,
+ .probe = fealnx_init_one,
+ .remove = fealnx_remove_one,
+};
+
+module_pci_driver(fealnx_driver);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bca68edfbe9c..da9d4b310fcd 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -370,8 +370,7 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
};
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
- struct ethtool_rmon_stats *s,
- const struct ethtool_rmon_hist_range **ranges)
+ struct ethtool_rmon_stats *s)
{
s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
@@ -393,8 +392,6 @@ static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
-
- *ranges = enetc_rmon_ranges;
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -447,13 +444,15 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
struct enetc_hw *hw = &priv->si->hw;
struct enetc_si *si = priv->si;
+ *ranges = enetc_rmon_ranges;
+
switch (rmon_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 0, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 1, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
ethtool_aggregate_rmon_stats(ndev, rmon_stats);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c73e25f8995e..f3b16a6673e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4251,7 +4251,7 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_ipc_init;
- if (of_get_property(np, "fsl,magic-packet", NULL))
+ if (of_property_read_bool(np, "fsl,magic-packet"))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
ret = fec_enet_init_stop_mode(fep, np);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index a7f4c3c29f3e..b88816b71ddf 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -937,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ if (of_property_read_bool(np, "fsl,7-wire-mode")) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b2def295523a..38d5013c6fed 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -787,10 +787,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
else
priv->interface = gfar_get_interface(dev);
- if (of_find_property(np, "fsl,magic-packet", NULL))
+ if (of_property_read_bool(np, "fsl,magic-packet"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
- if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ if (of_property_read_bool(np, "fsl,wake-on-filer"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index ce574d097e28..5f81470843b4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- int err = gve_adminq_report_link_speed(priv);
+ int err = 0;
+
+ if (priv->link_speed == 0)
+ err = gve_adminq_report_link_speed(priv);
cmd->base.speed = priv->link_speed;
return err;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index daec9ce04531..54bb4d9a0d1e 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
void __iomem *mpu_addr;
void __iomem *ca_addr;
u8 __iomem *eth_addr;
+ u8 mac[ETH_ALEN];
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
@@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev)
goto probe_failed;
/* someone seems to like messed up stuff */
- netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
- netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
- netdevice->dev_addr[2] = readb(eth_addr + 0x09);
- netdevice->dev_addr[3] = readb(eth_addr + 0x08);
- netdevice->dev_addr[4] = readb(eth_addr + 0x07);
- netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+ mac[0] = readb(eth_addr + 0x0b);
+ mac[1] = readb(eth_addr + 0x0a);
+ mac[2] = readb(eth_addr + 0x09);
+ mac[3] = readb(eth_addr + 0x08);
+ mac[4] = readb(eth_addr + 0x07);
+ mac[5] = readb(eth_addr + 0x06);
+ eth_hw_addr_set(netdevice, mac);
iounmap(eth_addr);
if (netdevice->irq < 0) {
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9b08e41ccc29..c97095abd26a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2939,9 +2939,9 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Fixup some feature bits based on the device tree */
- if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+ if (of_property_read_bool(np, "has-inverted-stacr-oc"))
dev->features |= EMAC_FTR_STACR_OC_INVERT;
- if (of_get_property(np, "has-new-stacr-staopc", NULL))
+ if (of_property_read_bool(np, "has-new-stacr-staopc"))
dev->features |= EMAC_FTR_HAS_NEW_STACR;
/* CAB lacks the appropriate properties */
@@ -3042,7 +3042,7 @@ static int emac_probe(struct platform_device *ofdev)
* property here for now, but new flat device trees should set a
* status property to "disabled" instead.
*/
- if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ if (of_property_read_bool(np, "unused") || !of_device_is_available(np))
return -ENODEV;
/* Find ourselves in the bootlist if we are there */
@@ -3333,7 +3333,7 @@ static void __init emac_make_bootlist(void)
if (of_match_node(emac_match, np) == NULL)
continue;
- if (of_get_property(np, "unused", NULL))
+ if (of_property_read_bool(np, "unused"))
continue;
idx = of_get_property(np, "cell-index", NULL);
if (idx == NULL)
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 242ef976fd15..50358cf00130 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -242,7 +242,7 @@ static int rgmii_probe(struct platform_device *ofdev)
}
/* Check for RGMII flags */
- if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
+ if (of_property_read_bool(ofdev->dev.of_node, "has-mdio"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 467001db5070..228cd502bb48 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -15525,6 +15525,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
int err;
int v_idx;
+ pci_set_drvdata(pf->pdev, pf);
pci_save_state(pf->pdev);
/* set up periodic task facility */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 924f972b91fa..72b091f2509d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
struct i40e_fdir_filter *data)
{
bool is_vlan = !!data->vlan_tag;
- struct vlan_hdr vlan;
- struct ipv6hdr ipv6;
- struct ethhdr eth;
- struct iphdr ip;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
u8 *tmp;
if (ipv4) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 16c490965b61..dd11dbbd5551 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 3273aeb8fa67..095201e83c9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* Do not track VLAN 0 filter, always added by the PF on VF init */
+ if (!vid)
+ return 0;
+
if (!VLAN_FILTERING_ALLOWED(adapter))
return -EIO;
@@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* We do not track VLAN 0 filter */
+ if (!vid)
+ return 0;
+
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
if (proto == cpu_to_be16(ETH_P_8021Q))
clear_bit(vid, adapter->vsi.active_cvlans);
@@ -5066,6 +5074,11 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_unlock(&adapter->crit_lock);
break;
}
+ /* Simply return if we already went through iavf_shutdown */
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
mutex_unlock(&adapter->crit_lock);
usleep_range(500, 1000);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 18b6a702a1d6..e989feda133c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 6d23338604bb..4e17d006c52d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2446,8 +2446,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
f->is_new_vlan = false;
- if (!f->vlan.vid)
- continue;
if (f->vlan.tpid == ETH_P_8021Q)
set_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b0e29e342401..e809249500e1 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -509,6 +509,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
@@ -955,16 +956,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- /* We can directly unplug aux device here only if the flag bit
- * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
- * could race with ice_plug_aux_dev() called from
- * ice_service_task(). In this case we only clear that bit now and
- * aux device will be unplugged later once ice_plug_aux_device()
- * called from ice_service_task() finishes (see ice_service_task()).
+ /* defer unplug to service task to avoid RTNL lock and
+ * clear PLUG bit so that pending plugs don't interfere
*/
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
-
+ clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
+ set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index c557dfc50aad..396e555023aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1411,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcena & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index b360bd8f1599..f86e814354a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4331,6 +4331,8 @@ ice_get_module_eeprom(struct net_device *netdev,
* SFP modules only ever use page 0.
*/
if (page == 0 || !(data[0x2] & 0x4)) {
+ u32 copy_len;
+
/* If i2c bus is busy due to slow page change or
* link management access, call can fail. This is normal.
* So we retry this a few times.
@@ -4354,8 +4356,8 @@ ice_get_module_eeprom(struct net_device *netdev,
}
/* Make sure we have enough room for the new block */
- if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
- memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+ copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
+ memcpy(data + i, value, copy_len);
}
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 781475480ff2..450317dfcca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,6 +291,7 @@ static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
struct ice_vsi_ctx *ctxt;
int status;
+ ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
@@ -2126,7 +2127,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
- return ret;
+ return 0;
}
/**
@@ -2693,12 +2694,14 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
return ret;
/* allocate memory for Tx/Rx ring stat pointers */
- if (ice_vsi_alloc_stat_arrays(vsi))
+ ret = ice_vsi_alloc_stat_arrays(vsi);
+ if (ret)
goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
+ ret = ice_vsi_get_qs(vsi);
+ if (ret) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
goto unroll_vsi_alloc_stat;
@@ -2811,6 +2814,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
break;
default:
/* clean up the resources and exit */
+ ret = -EINVAL;
goto unroll_vsi_init;
}
@@ -2889,7 +2893,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
- ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
@@ -3508,10 +3511,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
goto err_vsi_cfg_tc_lan;
- } else {
- kfree(coalesce);
- return ice_schedule_reset(pf, ICE_RESET_PFR);
}
+
+ kfree(coalesce);
+ return ice_schedule_reset(pf, ICE_RESET_PFR);
}
ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
@@ -3759,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
dev = ice_pf_to_dev(pf);
if (vsi->tc_cfg.ena_tc == ena_tc &&
vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
- return ret;
+ return 0;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 567694bf098b..0d8b8c6f9bd3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work)
}
}
- if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
- /* Plug aux device per request */
- ice_plug_aux_dev(pf);
+ /* unplug aux dev per request, if an unplug request came in
+ * while processing a plug request, this will handle it
+ */
+ if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
- /* Mark plugging as done but check whether unplug was
- * requested during ice_plug_aux_dev() call
- * (e.g. from ice_clear_rdma_cap()) and if so then
- * plug aux device.
- */
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
- }
+ /* Plug aux device per request */
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
struct iidc_event *event;
@@ -4644,6 +4641,12 @@ static int ice_start_eth(struct ice_vsi *vsi)
return err;
}
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+ ice_fltr_remove_all(vsi);
+ ice_vsi_close(vsi);
+}
+
static int ice_init_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -5132,7 +5135,7 @@ void ice_unload(struct ice_pf *pf)
{
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- ice_vsi_close(ice_get_main_vsi(pf));
+ ice_stop_eth(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf));
ice_deinit_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 96a64c25e2ef..0cc05e54a781 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1341,15 +1341,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
if (ice_is_eswitch_mode_switchdev(pf)) {
dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
return -EOPNOTSUPP;
}
- vf = ice_get_vf_by_id(pf, vf_id);
- if (!vf)
- return -EINVAL;
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 6b48cbc049c6..76f29a5bf8d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1455,8 +1455,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
headers->vlan_hdr.vlan_prio =
- cpu_to_be16((match.key->vlan_priority <<
- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
}
if (match.mask->vlan_tpid)
@@ -1489,8 +1489,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
headers->cvlan_hdr.vlan_prio =
- cpu_to_be16((match.key->vlan_priority <<
- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dfd22862e926..b61dd9f01540 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1210,6 +1210,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt)
ntc = 0;
+ rx_ring->first_desc = ntc;
continue;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 31565bbafa22..d1e489da7363 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
}
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err)
@@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03bc1e8af575..274c781b5547 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
+static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
int igb_close(struct net_device *);
@@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
-static int igb_disable_sriov(struct pci_dev *dev);
-static int igb_pci_disable_sriov(struct pci_dev *dev);
+static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
static int igb_suspend(struct device *);
@@ -3665,7 +3664,7 @@ err_sw_init:
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
+ igb_disable_sriov(pdev, false);
#endif
pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
@@ -3679,7 +3678,38 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+ else
+ igb_reset(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev)
adapter->flags |= IGB_FLAG_DMAC;
}
- return 0;
+ return reinit ? igb_sriov_reinit(pdev) : 0;
}
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
"Unable to allocate memory for VF MAC filter list\n");
}
- /* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs) {
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
- }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* DMA Coalescing is not supported in IOV mode. */
adapter->flags &= ~IGB_FLAG_DMAC;
+
+ if (reinit) {
+ err = igb_sriov_reinit(pdev);
+ if (err)
+ goto err_out;
+ }
+
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs)
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+
goto out;
err_out:
@@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
- rtnl_lock();
- igb_disable_sriov(pdev);
- rtnl_unlock();
+ igb_disable_sriov(pdev, false);
#endif
unregister_netdev(netdev);
@@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
igb_reset_interrupt_capability(adapter);
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs, false);
#endif /* CONFIG_PCI_IOV */
}
@@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
-{
- struct net_device *netdev = pci_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- rtnl_lock();
-
- if (netif_running(netdev))
- igb_close(netdev);
- else
- igb_reset(adapter);
-
- igb_clear_interrupt_scheme(adapter);
-
- igb_init_queue_configuration(adapter);
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- rtnl_unlock();
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- if (netif_running(netdev))
- igb_open(netdev);
-
- rtnl_unlock();
-
- return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
- int err = igb_disable_sriov(dev);
-
- if (!err)
- err = igb_sriov_reinit(dev);
-
- return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
- int err = igb_enable_sriov(dev, num_vfs);
-
- if (err)
- goto out;
-
- err = igb_sriov_reinit(dev);
- if (!err)
- return num_vfs;
-
-out:
- return err;
-}
-
-#endif
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
- if (num_vfs == 0)
- return igb_pci_disable_sriov(dev);
- else
- return igb_pci_enable_sriov(dev, num_vfs);
+ int err;
+
+ if (num_vfs == 0) {
+ return igb_disable_sriov(dev, true);
+ } else {
+ err = igb_enable_sriov(dev, num_vfs, true);
+ return err ? err : num_vfs;
+ }
#endif
return 0;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 3a32809510fc..72cb1b56e9f2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/etherdevice.h>
+
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2928a6c73692..25fc6c65209b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- if (e->gate_mask & BIT(i))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (e->gate_mask & BIT(i)) {
queue_uses[i]++;
- /* There are limitations: A single queue cannot be
- * opened and closed multiple times per cycle unless the
- * gate stays open. Check for it.
- */
- if (queue_uses[i] > 1 &&
- !(prev->gate_mask & BIT(i)))
- return false;
- }
+ /* There are limitations: A single queue cannot
+ * be opened and closed multiple times per cycle
+ * unless the gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
+ return false;
+ }
}
return true;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 9b4ecbe4f36d..3ea00bc9b91c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4996,6 +4996,14 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
+ if (percpu && port->ntxqs >= num_possible_cpus() * 2)
+ xdp_set_features_flag(port->dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
+ else
+ xdp_clear_features_flag(port->dev);
+
mvpp2_swf_bm_pool_init(port);
if (status[i])
mvpp2_open(port->dev);
@@ -6863,13 +6871,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!port->priv->percpu_pools)
mvpp2_set_hw_csum(port, port->pool_long->id);
+ else if (port->ntxqs >= num_possible_cpus() * 2)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
dev->vlan_features |= features;
netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
- dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT;
-
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 389663a13d1d..ef721caeac49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -884,6 +884,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
int rvu_cpt_init(struct rvu *rvu);
+#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
+
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
@@ -902,6 +905,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index fa280ebd3052..26cfa501f1a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -198,9 +198,6 @@ enum cpt_eng_type {
CPT_IE_TYPE = 3,
};
-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
- blk_addr, NDC_AF_CONST) & 0xFF)
-
#define rvu_dbg_NULL NULL
#define rvu_dbg_open_NULL NULL
@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
struct nix_hw *nix_hw;
struct rvu *rvu;
int bank, max_bank;
+ u64 ndc_af_const;
if (blk_addr == BLKADDR_NDC_NPA0) {
rvu = s->private;
@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
rvu = nix_hw->rvu;
}
- max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
seq_printf(s, "\tHits:\t%lld\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 26e639e57dae..4ad707e758b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct nix_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
+ int ret;
result = (struct nix_aq_res_s *)aq->res->base;
@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NIX_AQ_COMP_GOOD)
+ if (result->compcode != NIX_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+ result->compcode == NIX_AQ_COMP_LOCKERR ||
+ result->compcode == NIX_AQ_COMP_CTX_POISON) {
+ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+ if (ret)
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 70bd036ed76e..4f5ca5ab13a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018 Marvell.
*
*/
-
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NPA_AQ_COMP_GOOD)
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
npa_ctx_free(rvu, pfvf);
}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 1729b22580ce..7007f0b8e659 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -694,6 +694,7 @@
#define NDC_AF_INTR_ENA_W1S (0x00068)
#define NDC_AF_INTR_ENA_W1C (0x00070)
#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
#define NDC_AF_BP_TEST_ENABLE (0x001F8)
#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
#define NDC_AF_BLK_RST (0x002F0)
@@ -709,6 +710,8 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+ (0x10000 | (a) << 12 | (b) << 3)
/* LBK */
#define LBK_CONST (0x10ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7f8ffbf79cf7..ab126f8706c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -709,6 +709,7 @@ err_unreg_netdev:
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
@@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 14be6ea51b88..3cb43623d3db 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -616,7 +616,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+ MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index afc9d52e79bf..084a6badef6d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -397,6 +397,7 @@
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
#define MAC_MCR_FORCE_RX_FC BIT(5)
@@ -541,6 +542,10 @@
#define SGMII_SEND_AN_ERROR_EN BIT(11)
#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+/* Register to reset SGMII design */
+#define SGMII_RESERVED_0 0x34
+#define SGMII_SW_RESET BIT(0)
+
/* Register to set SGMII speed, ANA RG_ Control Signals III*/
#define SGMSYS_ANA_RG_CS3 0x2028
#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index bb00de1003ac..83976dc86887 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -38,20 +38,16 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
+ bool mode_changed = false, changed, use_an;
struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
unsigned int rgc3, sgm_mode, bmcr;
int advertise, link_timer;
- bool changed, use_an;
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
return advertise;
- link_timer = phylink_get_link_timer_ns(interface);
- if (link_timer < 0)
- return link_timer;
-
/* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
* we assume that fixes it's speed at bitrate = line rate (in
* other words, 1000Mbps or 2500Mbps).
@@ -77,17 +73,24 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
}
if (use_an) {
- /* FIXME: Do we need to set AN_RESTART here? */
- bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
+ bmcr = SGMII_AN_ENABLE;
} else {
bmcr = 0;
}
if (mpcs->interface != interface) {
+ link_timer = phylink_get_link_timer_ns(interface);
+ if (link_timer < 0)
+ return link_timer;
+
/* PHYA power down */
regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+ /* Reset SGMII PCS state */
+ regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0,
+ SGMII_SW_RESET, SGMII_SW_RESET);
+
if (interface == PHY_INTERFACE_MODE_2500BASEX)
rgc3 = RG_PHY_SPEED_3_125G;
else
@@ -97,16 +100,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
RG_PHY_SPEED_3_125G, rgc3);
+ /* Setup the link timer */
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
+
mpcs->interface = interface;
+ mode_changed = true;
}
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
SGMII_ADVERTISE, advertise, &changed);
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
-
/* Update the sgmsys mode register */
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
@@ -114,7 +118,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
/* Update the BMCR */
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
- SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
+ SGMII_AN_ENABLE, bmcr);
/* Release PHYA power down state
* Only removing bit SGMII_PHYA_PWD isn't enough.
@@ -128,7 +132,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
usleep_range(50, 100);
regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
- return changed;
+ return changed || mode_changed;
}
static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 0869d4fff17b..4b5e459b6d49 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -674,7 +674,7 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
mlx4_en_get_cqe_ts(_ctx->cqe));
@@ -686,7 +686,7 @@ int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 88460b7796e5..4a19ef4a9811 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -313,7 +313,6 @@ struct mlx5e_params {
} channel;
} mqprio;
bool rx_cqe_compress_def;
- bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
struct mlx5e_packet_merge_param packet_merge;
@@ -1243,6 +1242,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
+void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index c4378afdec09..1bd1c94fb977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -178,7 +178,6 @@ tc_act_police_stats(struct mlx5e_priv *priv,
meter = mlx5e_tc_meter_get(priv->mdev, &params);
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
- mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
return PTR_ERR(meter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
index 626cb7470fa5..07c1895a2b23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
@@ -64,6 +64,7 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
{
struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
struct rhashtable *ht = &handle->ht;
+ u64 lastused;
int err = 0;
act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
@@ -73,6 +74,10 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
act_stats->tc_act_cookie = act_cookie;
act_stats->counter = counter;
+ mlx5_fc_query_cached_raw(counter,
+ &act_stats->lastbytes,
+ &act_stats->lastpackets, &lastused);
+
rcu_read_lock();
old_act_stats = rhashtable_lookup_get_insert_fast(ht,
&act_stats->hash,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index bcd6370de440..c5dae48b7932 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -162,7 +162,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
_ctx->rq->clock, get_cqe_ts(_ctx->cqe));
@@ -174,7 +174,7 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 4be770443b0c..9b597cb24598 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -621,15 +621,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (unlikely(!priv_rx))
return -ENOMEM;
- dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
- if (IS_ERR(dek)) {
- err = PTR_ERR(dek);
- goto err_create_key;
- }
- priv_rx->dek = dek;
-
- INIT_LIST_HEAD(&priv_rx->list);
- spin_lock_init(&priv_rx->lock);
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_rx->crypto_info.crypto_info_128 =
@@ -642,9 +633,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_cipher_type;
}
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
+ goto err_cipher_type;
+ }
+ priv_rx->dek = dek;
+
+ INIT_LIST_HEAD(&priv_rx->list);
+ spin_lock_init(&priv_rx->lock);
+
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
priv_rx->sk = sk;
@@ -677,7 +679,7 @@ err_post_wqes:
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
-err_create_key:
+err_cipher_type:
kfree(priv_rx);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 60b3e08a1028..0e4c0a093293 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -469,14 +469,6 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
- dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
- if (IS_ERR(dek)) {
- err = PTR_ERR(dek);
- goto err_create_key;
- }
- priv_tx->dek = dek;
-
- priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_tx->crypto_info.crypto_info_128 =
@@ -489,8 +481,18 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_pool_push;
}
+
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
+ goto err_pool_push;
+ }
+
+ priv_tx->dek = dek;
+ priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -500,7 +502,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
return 0;
-err_create_key:
+err_pool_push:
pool_push(pool, priv_tx);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 08d0929e8260..33b3620ea45c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
};
struct mlx5e_macsec_umr {
+ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
dma_addr_t dma_addr;
- u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
u32 mkey;
};
@@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
+ unsigned long expires;
int err;
aso = &macsec->aso;
@@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(maso, false);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
+
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2449731b7d79..89de92d06483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < ets->ets_cap; i++) {
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
+ }
+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 7708acc9b2ab..79fd21ecb9cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1985,6 +1985,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
+ int err;
if (enable) {
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
@@ -2005,7 +2006,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
mlx5e_set_rq_type(mdev, &new_params);
- return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ if (err)
+ return err;
+
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
+ return 0;
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 76a9c5194a70..7ca7e9b57607 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4004,6 +4004,25 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return 0;
}
+void mlx5e_set_xdp_feature(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params *params = &priv->channels.params;
+ xdp_features_t val;
+
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ xdp_clear_features_flag(netdev);
+ return;
+ }
+
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+ val |= NETDEV_XDP_ACT_RX_SG;
+ xdp_set_features_flag(netdev, val);
+}
+
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t oper_features = features;
@@ -4030,6 +4049,9 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return -EINVAL;
}
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
return 0;
}
@@ -4128,8 +4150,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
+ features |= NETIF_F_NETNS_LOCAL;
+ } else {
+ features &= ~NETIF_F_NETNS_LOCAL;
+ }
mutex_unlock(&priv->state_lock);
@@ -4147,13 +4173,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
+ int max_xdp_mtu;
if (!xsk_pool)
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
+ max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
- if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
+ /* Validate XSK params and XDP MTU in advance */
+ if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
+ new_params->sw_mtu > max_xdp_mtu) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
int max_mtu_frame, max_mtu_page, max_mtu;
@@ -4163,9 +4193,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
- max_mtu = min(max_mtu_frame, max_mtu_page);
+ max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
- netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
+ netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
@@ -4761,13 +4791,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
- if (reset) {
- if (prog)
- xdp_features_set_redirect_target(netdev, true);
- else
- xdp_features_clear_redirect_target(netdev);
- }
-
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -4964,8 +4987,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
-
/* AF_XDP */
params->xsk = xsk;
@@ -5163,13 +5184,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_RX_SG;
-
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ mlx5e_set_xdp_feature(netdev);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
@@ -5241,6 +5259,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv);
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
return 0;
}
@@ -5270,7 +5291,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
}
features = MLX5E_RX_RES_FEATURE_PTP;
- if (priv->channels.params.tunneled_offload_en)
+ if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 9b9203443085..8ff654b4e9e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -747,12 +747,14 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ /* update XDP supported features */
+ mlx5e_set_xdp_feature(netdev);
+
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->mqprio.num_tc = 1;
- params->tunneled_offload_en = false;
if (rep->vport != MLX5_VPORT_UPLINK)
params->vlan_strip_disable = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 70b8d2dfa751..87a2850b32d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1103,8 +1103,8 @@ static void
mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
struct mlx5_core_dev *mdev)
{
+ u32 link_speed = 0;
u64 link_speed64;
- u32 link_speed;
hairpin_params->mdev = mdev;
/* set hairpin pair per each 50Gbs share of the link */
@@ -3752,7 +3752,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
attr2->counter = NULL;
- attr->tc_act_cookies_count = 0;
+ attr2->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
@@ -4304,6 +4304,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
esw_attr->dest_int_port = dest_int_port;
esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+ esw_attr->split_count = out_index;
/* Forward to root fdb for matching against the new source vport */
attr->dest_chain = 0;
@@ -5304,8 +5305,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
tc->action_stats_handle = mlx5e_tc_act_stats_create();
- if (IS_ERR(tc->action_stats_handle))
+ if (IS_ERR(tc->action_stats_handle)) {
+ err = PTR_ERR(tc->action_stats_handle);
goto err_act_stats;
+ }
return 0;
@@ -5440,8 +5443,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
}
uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
- if (IS_ERR(uplink_priv->action_stats_handle))
+ if (IS_ERR(uplink_priv->action_stats_handle)) {
+ err = PTR_ERR(uplink_priv->action_stats_handle);
goto err_action_counter;
+ }
return 0;
@@ -5463,6 +5468,16 @@ err_tun_mapping:
void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
+
+ mlx5e_tc_clean_fdb_peer_flows(esw);
+
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index d55775627a47..50d2ea323979 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vpo
if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
- err = PTR_ERR(vport);
- goto out;
+ return PTR_ERR(vport);
}
esw_acl_ingress_ofld_rules_destroy(esw, vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 0f052513fefa..8bdf28762f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -959,6 +959,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+ esw_apply_vport_rx_mode(esw, vport, false, false);
esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d766a64b1823..25a8076a77bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -723,11 +723,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
- if (esw_is_indir_table(esw, attr))
- err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
- else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
- err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
- &i);
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ /* Source port rewrite (forward to ovs internal port or statck device) isn't
+ * supported in the rule of split action.
+ */
+ err = -EOPNOTSUPP;
else
esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
@@ -3405,6 +3405,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
+static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+{
+ struct net *devl_net, *netdev_net;
+ struct mlx5_eswitch *esw;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+ devl_net = devlink_net(devlink);
+
+ return net_eq(devl_net, netdev_net);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3419,6 +3431,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index c2a4f86bc890..baa7ef812313 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
- params->tunneled_offload_en = false;
/* CQE compression is not supported for IPoIB */
params->rx_cqe_compress_def = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 540840e80493..f1de152a6113 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1364,8 +1364,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
- mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch);
+ mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
@@ -1789,11 +1789,11 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
* fw_reset before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
- set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 64d4e7125e9b..95dc67fb3001 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
}
+static u32 mlx5_get_ec_function(u32 function)
+{
+ return function >> 16;
+}
+
+static u32 mlx5_get_func_id(u32 function)
+{
+ return function & 0xffff;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
}
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
- struct rb_root *root, u16 func_id)
+ struct rb_root *root, u32 function)
{
u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
+ u32 ec_function = mlx5_get_ec_function(function);
+ u32 function_id = mlx5_get_func_id(function);
int nclaimed;
int err;
- err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
- &nclaimed, false, mlx5_core_is_ecpf(dev));
+ err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
+ &nclaimed, false, ec_function);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
- err, func_id);
+ mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
+ err, function_id, ec_function);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index c5240d38c9db..09ed6e5fa6c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -105,7 +105,6 @@ struct mlxsw_thermal {
struct thermal_zone_device *tzdev;
int polling_delay;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
- u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -468,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
return idx;
/* Normalize the state to the valid speed range. */
- state = thermal->cooling_levels[state];
+ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
@@ -859,10 +858,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
}
- /* Initialize cooling levels per PWM state. */
- for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
-
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
MLXSW_THERMAL_POLL_INT;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a8f94b7544ee..02a327744a61 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2937,6 +2937,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
{
+ refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
mutex_init(&mlxsw_sp->parsing.lock);
@@ -2945,6 +2946,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
{
mutex_destroy(&mlxsw_sp->parsing.lock);
+ WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
}
struct mlxsw_sp_ipv6_addr_node {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 045a24cacfa5..b6ee2d658b0c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* In case there are no {Port, VID} => FID mappings on the port,
@@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 09e32778b012..4a73e2fe95ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -10381,11 +10381,23 @@ err_reg_write:
old_inc_parsing_depth);
return err;
}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
+
+ mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
+ false);
+}
#else
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
return 0;
}
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+}
#endif
static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
@@ -10615,6 +10627,7 @@ err_register_inet6addr_notifier:
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
err_dscp_init:
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
@@ -10655,6 +10668,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
+ mlxsw_sp_mp_hash_fini(mlxsw_sp);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
index a9aec900d608..7d66fe75cd3b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
return -EINVAL;
}
- err = lan966x_police_del(port, port->tc.police_id);
+ err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to add policer to port");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 871a3e62f852..2d763664dcda 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -249,6 +249,21 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
return 0;
}
+static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app app_itr;
@@ -264,7 +279,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
if (prio) {
app_itr = *app;
app_itr.priority = prio;
- dcb_ieee_delapp(dev, &app_itr);
+ sparx5_dcb_ieee_delapp(dev, &app_itr);
}
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
@@ -281,21 +296,6 @@ out:
return err;
}
-static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
-{
- int err;
-
- if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
- else
- err = dcb_ieee_delapp(dev, app);
-
- if (err < 0)
- return err;
-
- return sparx5_dcb_app_update(dev);
-}
-
static int sparx5_dcb_setapptrust(struct net_device *dev, u8 *selectors,
int nselectors)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index bdb893476832..d0e6cd8dbe5c 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -258,6 +258,7 @@ struct ocelot_stat_layout {
struct ocelot_stats_region {
struct list_head node;
u32 base;
+ enum ocelot_stat first_stat;
int count;
u32 *buf;
};
@@ -273,6 +274,7 @@ static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS]
OCELOT_STAT(RX_ASSEMBLY_OK),
OCELOT_STAT(RX_MERGE_FRAGMENTS),
OCELOT_STAT(TX_MERGE_FRAGMENTS),
+ OCELOT_STAT(TX_MM_HOLD),
OCELOT_STAT(RX_PMAC_OCTETS),
OCELOT_STAT(RX_PMAC_UNICAST),
OCELOT_STAT(RX_PMAC_MULTICAST),
@@ -341,11 +343,12 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
*/
static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int j;
list_for_each_entry(region, &ocelot->stats_regions, node) {
+ unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat;
+
for (j = 0; j < region->count; j++) {
u64 *stat = &ocelot->stats[idx + j];
u64 val = region->buf[j];
@@ -355,8 +358,6 @@ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
*stat = (*stat & ~(u64)U32_MAX) + val;
}
-
- idx += region->count;
}
}
@@ -899,7 +900,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!layout[i].reg)
continue;
- if (region && layout[i].reg == last + 4) {
+ if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
+ ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -914,6 +916,7 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
WARN_ON(last >= layout[i].reg);
region->base = layout[i].reg;
+ region->first_stat = i;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d17d1b4f2585..825356ee3492 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!*new_addr) {
+ if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 59fb0583cc08..0cc026b0aefd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -324,14 +324,15 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
- nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ if (ipsec)
+ nfp_nfd3_ipsec_tx(txd, skb);
+ else
+ nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= NFD3_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
- if (ipsec)
- nfp_nfd3_ipsec_tx(txd, skb);
/* Gather DMA */
if (nr_frags > 0) {
__le64 second_half;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
index e90f8c975903..51087693072c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
@@ -10,9 +10,30 @@
void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ int l4_proto;
if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
- txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
- NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
+ txd->flags |= NFD3_DESC_TX_CSUM;
+
+ if (iph->version == 4)
+ txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+
+ if (x->props.mode == XFRM_MODE_TRANSPORT)
+ l4_proto = xo->proto;
+ else if (x->props.mode == XFRM_MODE_TUNNEL)
+ l4_proto = xo->inner_ipproto;
+ else
+ return;
+
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+ return;
+ case IPPROTO_TCP:
+ txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+ return;
+ }
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index d60c0e991a91..33b6d74adb4b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -387,7 +387,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
- metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+ if (!ipsec)
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
txd->raw = cpu_to_le64(metadata);
txd++;
} else {
@@ -395,7 +396,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
real_len = txbuf->real_len;
/* Metadata desc */
- metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+ if (!ipsec)
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
txd->raw = cpu_to_le64(metadata);
txd += 2;
txbuf++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
index 58d8f59eb885..cec199f4c852 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -9,9 +9,13 @@
u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ struct iphdr *iph = ip_hdr(skb);
- if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
- flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+ if (iph->version == 4)
+ flags |= NFDK_DESC_TX_L3_CSUM;
+ flags |= NFDK_DESC_TX_L4_CSUM;
+ }
return flags;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 81b7ca0ad222..62f0bf91d1e1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -38,6 +38,7 @@
#include <net/tls.h>
#include <net/vxlan.h>
#include <net/xdp_sock_drv.h>
+#include <net/xfrm.h>
#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
@@ -1897,6 +1898,9 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
features &= ~NETIF_F_GSO_MASK;
}
+ if (xfrm_offload(skb))
+ return features;
+
/* VXLAN/GRE check */
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d61cd32ec3b6..86a93cac2647 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
num_vports = p_hwfn->qm_info.num_vports;
+ if (num_vports < 2) {
+ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
+ return -EINVAL;
+ }
+
/* Accounting for the vports which are configured for WFQ explicitly */
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index 6190adf965bc..f55eed092f25 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
if (p_time->hour > 23)
p_time->hour = 0;
if (p_time->min > 59)
- p_time->hour = 0;
+ p_time->min = 0;
if (p_time->msec > 999)
p_time->msec = 0;
if (p_time->usec > 999)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 2bf18748581d..fa167b1aa019 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
- if (!vf->vport_instance)
+ if (!vf || !vf->vport_instance)
continue;
memset(&params, 0, sizeof(params));
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3115b2c12898..eaa50050aa0b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);
+ free_irq(adpt->irq.irq, &adpt->irq);
+ cancel_work_sync(&adpt->work_thread);
+
emac_clks_teardown(adpt);
put_device(&adpt->phydev->mdio.dev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0f54849a3823..894e2690c643 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
- /* Indicate that the MAC is responsible for managing PHY PM */
- phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *pn;
int error;
/* Bitbang init */
@@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv)
if (error)
goto out_free_bus;
+ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ phydev = of_phy_find_device(pn);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+ of_node_put(pn);
+
return 0;
out_free_bus:
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 853394e5bb8b..c4f93d24c6a4 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -702,13 +702,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
u16 pkt_len;
u32 get_ts;
+ if (*quota <= 0)
+ return true;
+
boguscnt = min_t(int, gq->ring_size, *quota);
limit = boguscnt;
desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
- if (--boguscnt < 0)
- break;
dma_rmb();
pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
skb = gq->skbs[gq->cur];
@@ -734,6 +735,9 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->rx_ring[gq->cur];
+
+ if (--boguscnt <= 0)
+ break;
}
num = rswitch_get_num_cur_queues(gq);
@@ -745,7 +749,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
- *quota -= limit - (++boguscnt);
+ *quota -= limit - boguscnt;
return boguscnt <= 0;
@@ -1437,7 +1441,10 @@ static int rswitch_open(struct net_device *ndev)
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
return 0;
};
@@ -1448,8 +1455,10 @@ static int rswitch_stop(struct net_device *ndev)
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
netif_tx_stop_all_queues(ndev);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
if (ts_info->port != rdev->port)
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 27d3d38c055f..b3e0411b408e 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -998,6 +998,7 @@ struct rswitch_private {
struct rcar_gen4_ptp_private *ptp_priv;
struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
+ DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS);
struct rswitch_gwca gwca;
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ed17163d7811..d8ec729825be 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev)
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
- /* Indicate that the MAC is responsible for managing PHY PM */
- phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -3097,6 +3095,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
struct bb_info *bitbang;
struct platform_device *pdev = mdp->pdev;
struct device *dev = &mdp->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *pn;
/* create bit control struct for PHY */
bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
@@ -3133,6 +3133,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
if (ret)
goto out_free_bus;
+ pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ phydev = of_phy_find_device(pn);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+ of_node_put(pn);
+
return 0;
out_free_bus:
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6b5d96bced47..ec9c130276d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -418,6 +418,7 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int host_dma_width;
unsigned int rssen;
unsigned int vlhash;
unsigned int sphen;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index ac8580f501e2..2a2be65d65a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -213,8 +213,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
struct device_node *np = dev->of_node;
int err = 0;
- if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
- dwmac->rmii_refclk_ext = true;
+ dwmac->rmii_refclk_ext = of_property_read_bool(np, "snps,rmii_refclk_ext");
dwmac->clk_tx = devm_clk_get(dev, "tx");
if (IS_ERR(dwmac->clk_tx)) {
@@ -289,7 +288,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
- plat_dat->addr64 = dwmac->ops->addr_width;
+ plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 7deb1f817dac..13aa919633b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -684,7 +684,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
intel_priv->is_pse = true;
plat->bus_id = 2;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
@@ -725,7 +725,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
intel_priv->is_pse = true;
plat->bus_id = 3;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 2f7d8e4561d9..9ae31e3dc821 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
- plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e4902a7bb61e..17310ade88dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1170,6 +1170,7 @@ static int stmmac_init_phy(struct net_device *dev)
phylink_ethtool_get_wol(priv->phylink, &wol);
device_set_wakeup_capable(priv->device, !!wol.supported);
+ device_set_wakeup_enable(priv->device, !!wol.wolopts);
}
return ret;
@@ -1430,7 +1431,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
if (!buf->page) {
@@ -4586,7 +4587,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
while (dirty-- > 0) {
@@ -6204,7 +6205,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
seq_printf(seq, "\tFlexible RX Parser: %s\n",
priv->dma_cap.frpsel ? "Y" : "N");
seq_printf(seq, "\tEnhanced Addressing: %d\n",
- priv->dma_cap.addr64);
+ priv->dma_cap.host_dma_width);
seq_printf(seq, "\tReceive Side Scaling: %s\n",
priv->dma_cap.rssen ? "Y" : "N");
seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
@@ -7177,20 +7178,22 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
- /* The current IP register MAC_HW_Feature1[ADDR64] only define
- * 32/40/64 bit width, but some SOC support others like i.MX8MP
- * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
- * So overwrite dma_cap.addr64 according to HW real design.
+ /* Ideally our host DMA address width is the same as for the
+ * device. However, it may differ and then we have to use our
+ * host DMA width for allocation and the device DMA width for
+ * register handling.
*/
- if (priv->plat->addr64)
- priv->dma_cap.addr64 = priv->plat->addr64;
+ if (priv->plat->host_dma_width)
+ priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
+ else
+ priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
- if (priv->dma_cap.addr64) {
+ if (priv->dma_cap.host_dma_width) {
ret = dma_set_mask_and_coherent(device,
- DMA_BIT_MASK(priv->dma_cap.addr64));
+ DMA_BIT_MASK(priv->dma_cap.host_dma_width));
if (!ret) {
- dev_info(priv->device, "Using %d bits DMA width\n",
- priv->dma_cap.addr64);
+ dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
+ priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
/*
* If more than 32 bits can be addressed, make sure to
@@ -7205,7 +7208,7 @@ int stmmac_dvr_probe(struct device *device,
goto error_hw_init;
}
- priv->dma_cap.addr64 = 32;
+ priv->dma_cap.host_dma_width = 32;
}
}
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 8addee6d04bd..734a817d3c94 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e6144d963eaa..ab8b09a9ef61 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9271,7 +9271,7 @@ static int niu_get_of_props(struct niu *np)
if (model)
strcpy(np->vpd.model, model);
- if (of_find_property(dp, "hot-swappable-phy", NULL)) {
+ if (of_property_read_bool(dp, "hot-swappable-phy")) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index fe86fbd58586..e220620d0ffc 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
vp = vnet_find_parent(hp, vdev->mp, vdev);
if (IS_ERR(vp)) {
pr_err("Cannot find port parent vnet\n");
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 16ee9c29cb35..8caf85acbb6a 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -636,6 +636,10 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, genf[req->index].length);
+ am65_cpts_write32(cpts, 0, genf[req->index].control);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
+
cpts->genf_enable |= BIT(req->index);
} else {
am65_cpts_write32(cpts, 0, genf[req->index].length);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e8f38e3f7706..25e707d7b87c 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -226,8 +226,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
if (IS_ERR(priv->gmii_sel))
return PTR_ERR(priv->gmii_sel);
- if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
- priv->rmii_clock_external = true;
+ priv->rmii_clock_external = of_property_read_bool(pdev->dev.of_node, "rmii-clock-ext");
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 751fb0bc65c5..2adf82a32bf6 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3583,13 +3583,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
/* init the hw stats lock */
spin_lock_init(&gbe_dev->hw_stats_lock);
- if (of_find_property(node, "enable-ale", NULL)) {
- gbe_dev->enable_ale = true;
+ gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
+ if (gbe_dev->enable_ale)
dev_info(dev, "ALE enabled\n");
- } else {
- gbe_dev->enable_ale = false;
+ else
dev_dbg(dev, "ALE bypass enabled*\n");
- }
ret = of_property_read_u32(node, "tx-queue",
&gbe_dev->tx_queue_id);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index cf8de8a7a8a1..9d535ae59626 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
+ dma_addr_t cpu_addr;
+
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- descr->bus_addr =
- dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- if (!descr->bus_addr)
+ cpu_addr = dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;
+ descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -365,26 +367,28 @@ iommu_error:
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
+ static const unsigned int rx_skb_size =
+ ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+ GELIC_NET_RXBUF_ALIGN - 1;
+ dma_addr_t cpu_addr;
int offset;
- unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
- descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(bufsize);
+ descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
- descr->skb->data,
- GELIC_NET_MAX_MTU,
- DMA_FROM_DEVICE));
- if (!descr->buf_addr) {
+ cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+ GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+ descr->buf_addr = cpu_to_be32(cpu_addr);
+ if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
- if (!buf) {
+ if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_MTU,
+ GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);
skb_put(skb, be32_to_cpu(descr->valid_size)?
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 68f324ed4eaf..0d98defb011e 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -19,8 +19,9 @@
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
-#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME 2312
+#define GELIC_NET_MAX_MTU 2294
+#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index a502812ac418..86f7843b4591 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2709,8 +2709,7 @@ static int velocity_get_platform_info(struct velocity_info *vptr)
struct resource res;
int ret;
- if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
- vptr->no_eeprom = 1;
+ vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom");
ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
if (ret) {
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index ffdac6fac054..f64ed39b93d8 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1383,7 +1383,7 @@ struct velocity_info {
struct device *dev;
struct pci_dev *pdev;
struct net_device *netdev;
- int no_eeprom;
+ bool no_eeprom;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 ip_addr[4];
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1066420d6a83..e0ac1bcd9925 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1455,12 +1455,11 @@ static int temac_probe(struct platform_device *pdev)
* endianness mode. Default for OF devices is big-endian.
*/
little_endian = false;
- if (temac_np) {
- if (of_get_property(temac_np, "little-endian", NULL))
- little_endian = true;
- } else if (pdata) {
+ if (temac_np)
+ little_endian = of_property_read_bool(temac_np, "little-endian");
+ else if (pdata)
little_endian = pdata->reg_little_endian;
- }
+
if (little_endian) {
lp->temac_ior = _temac_ior_le;
lp->temac_iow = _temac_iow_le;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 894e92ef415b..9f505cf02d96 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -503,6 +503,11 @@ static void
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ struct local_info *local = netdev_priv(dev);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ cancel_work_sync(&local->tx_timeout_task);
dev_dbg(&link->dev, "detach\n");
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e1a569b99e4a..0b0c6c0764fe 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1913,6 +1913,8 @@ static int ca8210_skb_tx(
* packet
*/
mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+ if (mac_len < 0)
+ return mac_len;
secspec.security_level = header.sec.level;
secspec.key_id_mode = header.sec.key_id_mode;
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
index 1412b67304c8..1651fbad4bd5 100644
--- a/drivers/net/ipa/gsi_reg.c
+++ b/drivers/net/ipa/gsi_reg.c
@@ -15,6 +15,14 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
switch (reg_id) {
case INTER_EE_SRC_CH_IRQ_MSK:
case INTER_EE_SRC_EV_CH_IRQ_MSK:
+ return gsi->version >= IPA_VERSION_3_5;
+
+ case HW_PARAM_2:
+ return gsi->version >= IPA_VERSION_3_5_1;
+
+ case HW_PARAM_4:
+ return gsi->version >= IPA_VERSION_5_0;
+
case CH_C_CNTXT_0:
case CH_C_CNTXT_1:
case CH_C_CNTXT_2:
@@ -43,7 +51,6 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
case CH_CMD:
case EV_CH_CMD:
case GENERIC_CMD:
- case HW_PARAM_2:
case CNTXT_TYPE_IRQ:
case CNTXT_TYPE_IRQ_MSK:
case CNTXT_SRC_CH_IRQ:
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index f62f0a5c653d..48fde65fa2e8 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -10,6 +10,10 @@
#include <linux/bits.h>
+struct platform_device;
+
+struct gsi;
+
/**
* DOC: GSI Registers
*
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 735fa6591609..3f475428dddd 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/io.h>
@@ -15,6 +15,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
enum ipa_version version = ipa->version;
switch (reg_id) {
+ case FILT_ROUT_HASH_EN:
+ return version == IPA_VERSION_4_2;
+
+ case FILT_ROUT_HASH_FLUSH:
+ return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2;
+
+ case FILT_ROUT_CACHE_FLUSH:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ return version >= IPA_VERSION_5_0;
+
case IPA_BCR:
case COUNTER_CFG:
return version < IPA_VERSION_4_5;
@@ -32,14 +43,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_4_5;
+ version == IPA_VERSION_4_5 ||
+ version == IPA_VERSION_5_0;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
- return version <= IPA_VERSION_3_1;
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_5_0;
case ENDP_FILTER_ROUTER_HSH_CFG:
- return version != IPA_VERSION_4_2;
+ return version < IPA_VERSION_5_0 &&
+ version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
@@ -51,10 +65,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
case SHARED_MEM_SIZE:
case QSB_MAX_WRITES:
case QSB_MAX_READS:
- case FILT_ROUT_HASH_EN:
- case FILT_ROUT_CACHE_CFG:
- case FILT_ROUT_HASH_FLUSH:
- case FILT_ROUT_CACHE_FLUSH:
case STATE_AGGR_ACTIVE:
case LOCAL_PKT_PROC_CNTXT:
case AGGR_FORCE_CLOSE:
@@ -76,8 +86,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
case ENDP_INIT_RSRC_GRP:
case ENDP_INIT_SEQ:
case ENDP_STATUS:
- case ENDP_FILTER_CACHE_CFG:
- case ENDP_ROUTER_CACHE_CFG:
case IPA_IRQ_STTS:
case IPA_IRQ_EN:
case IPA_IRQ_CLR:
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 28aa1351dd48..7dd65d39333d 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -60,9 +60,8 @@ enum ipa_reg_id {
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
- FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
- FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
- FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
+ FILT_ROUT_HASH_EN, /* IPA v4.2 */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v4.2 nor IPA v5.0+ */
FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
@@ -77,12 +76,12 @@ enum ipa_reg_id {
TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
SRC_RSRC_GRP_01_RSRC_TYPE,
SRC_RSRC_GRP_23_RSRC_TYPE,
- SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
- SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
DST_RSRC_GRP_01_RSRC_TYPE,
DST_RSRC_GRP_23_RSRC_TYPE,
- DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
- DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */
ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
ENDP_INIT_CFG,
ENDP_INIT_NAT, /* TX only */
@@ -206,14 +205,6 @@ enum ipa_reg_qsb_max_reads_field_id {
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
-/* FILT_ROUT_CACHE_CFG register */
-enum ipa_reg_filt_rout_cache_cfg_field_id {
- ROUTER_CACHE_EN,
- FILTER_CACHE_EN,
- LOW_PRI_HASH_HIT_DISABLE,
- LRU_EVICTION_THRESHOLD,
-};
-
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
index 57b457f39b6e..2ee07eebca67 100644
--- a/drivers/net/ipa/reg.h
+++ b/drivers/net/ipa/reg.h
@@ -6,7 +6,8 @@
#define _REG_H_
#include <linux/types.h>
-#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/bug.h>
/**
* struct reg - A register descriptor
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
index 648b51b88d4e..2900e5c3ff88 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.5.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -137,17 +137,17 @@ REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
- 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
- 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
-REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
@@ -155,7 +155,7 @@ static const u32 reg_ch_cmd_fmask[] = {
[CH_OPCODE] = GENMASK(31, 24),
};
-REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
@@ -163,7 +163,7 @@ static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_OPCODE] = GENMASK(31, 24),
};
-REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
@@ -172,7 +172,7 @@ static const u32 reg_generic_cmd_fmask[] = {
/* Bits 14-31 reserved */
};
-REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
@@ -188,58 +188,58 @@ static const u32 reg_hw_param_2_fmask[] = {
[GSI_USE_INTER_EE] = BIT(31),
};
-REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
- 0x0001f098 + 0x4000 * GSI_EE_AP);
+ 0x00012098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
- 0x0001f09c + 0x4000 * GSI_EE_AP);
+ 0x0001209c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
- 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
- 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
- 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
- 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
-REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
-REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
-REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
-REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
@@ -248,7 +248,7 @@ static const u32 reg_cntxt_scratch_0_fmask[] = {
/* Bits 8-31 reserved */
};
-REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
index 4bf45d264d6b..8b5d95425a76 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.9.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -27,7 +27,7 @@ static const u32 reg_ch_c_cntxt_0_fmask[] = {
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
- 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(19, 0),
@@ -35,11 +35,11 @@ static const u32 reg_ch_c_cntxt_1_fmask[] = {
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
- 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
-REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
-REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
@@ -53,7 +53,7 @@ static const u32 reg_ch_c_qos_fmask[] = {
/* Bits 25-31 reserved */
};
-REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
@@ -67,16 +67,16 @@ static const u32 reg_error_log_fmask[] = {
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
- 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
- 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
- 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
- 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
@@ -89,23 +89,23 @@ static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
- 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
- 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
- 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
- 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
- 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
@@ -114,28 +114,28 @@ static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
- 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
- 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
- 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
- 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
- 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
- 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
- 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
- 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index 943d26cbf39f..71712ea25403 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
goto out;
skb->dev = addr->master->dev;
+ skb->skb_iif = skb->dev->ifindex;
len = skb->len + ETH_HLEN;
ipvlan_count_rx(addr->master, len, true, false);
out:
diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
index d77c987fda9c..4630dde01974 100644
--- a/drivers/net/mdio/acpi_mdio.c
+++ b/drivers/net/mdio/acpi_mdio.c
@@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
/**
- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
* @mdio: pointer to mii_bus structure
* @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * @owner: module owning this @mdio object.
* an ACPI device object corresponding to the MDIO bus and its children are
* expected to correspond to the PHY devices on that bus.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @fwnode.
*/
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner)
{
struct fwnode_handle *child;
u32 addr;
@@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
/* Mask out all PHYs from auto probing. */
mdio->phy_mask = GENMASK(31, 0);
- ret = mdiobus_register(mdio);
+ ret = __mdiobus_register(mdio, owner);
if (ret)
return ret;
@@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
}
return 0;
}
-EXPORT_SYMBOL(acpi_mdiobus_register);
+EXPORT_SYMBOL(__acpi_mdiobus_register);
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 3847ee92c109..6067d96b2b7b 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -106,6 +106,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
if (i >= ARRAY_SIZE(nexus->buses))
break;
}
+ fwnode_handle_put(fwn);
return 0;
err_release_regions:
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 510822d6d0d9..1e46e39f5f46 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
/**
- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
* @np: pointer to device_node of MDIO bus.
+ * @owner: module owning the @mdio object.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @np.
*/
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner)
{
struct device_node *child;
bool scanphys = false;
int addr, rc;
if (!np)
- return mdiobus_register(mdio);
+ return __mdiobus_register(mdio, owner);
/* Do not continue if the node is disabled */
if (!of_device_is_available(np))
@@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
/* Register the MDIO bus */
- rc = mdiobus_register(mdio);
+ rc = __mdiobus_register(mdio, owner);
if (rc)
return rc;
@@ -236,7 +238,7 @@ unregister:
mdiobus_unregister(mdio);
return rc;
}
-EXPORT_SYMBOL(of_mdiobus_register);
+EXPORT_SYMBOL(__of_mdiobus_register);
/**
* of_mdio_find_device - Given a device tree node, find the mdio_device
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index b560e99695df..69b829e6ab35 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register);
#if IS_ENABLED(CONFIG_OF_MDIO)
/**
- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
* @dev: Device to register mii_bus for
* @mdio: MII bus structure to register
* @np: Device node to parse
+ * @owner: Owning module
*/
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np)
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner)
{
struct mdiobus_devres *dr;
int ret;
@@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
if (!dr)
return -ENOMEM;
- ret = of_mdiobus_register(mdio, np);
+ ret = __of_mdiobus_register(mdio, np, owner);
if (ret) {
devres_free(dr);
return ret;
@@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
devres_add(dev, dr);
return 0;
}
-EXPORT_SYMBOL(devm_of_mdiobus_register);
+EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index ccecee2524ce..0b88635f4fbc 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static void lan88xx_link_change_notify(struct phy_device *phydev)
+{
+ int temp;
+
+ /* At forced 100 F/H mode, chip may fail to set mode correctly
+ * when cable is switched between long(~50+m) and short one.
+ * As workaround, set to 10 before setting to 100
+ * at forced 100 F/H mode.
+ */
+ if (!phydev->autoneg && phydev->speed == 100) {
+ /* disable phy interrupt */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+ temp |= BMCR_SPEED100;
+ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+ /* clear pending interrupt generated while workaround */
+ temp = phy_read(phydev, LAN88XX_INT_STS);
+
+ /* enable phy interrupt back */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+ }
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c132,
@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
+ .link_change_notify = lan88xx_link_change_notify,
.config_intr = lan88xx_phy_config_intr,
.handle_interrupt = lan88xx_handle_interrupt,
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 8a13b1ad9a33..62bf99e45af1 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -280,12 +280,9 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc < 0) {
- rc = phy_restore_page(phydev, rc, rc);
- goto out_unlock;
- }
+ if (rc < 0)
+ return phy_restore_page(phydev, rc, rc);
if (wol->wolopts & WAKE_MAGIC) {
/* Store the device address for the magic packet */
@@ -323,7 +320,7 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
if (rc < 0)
- goto out_unlock;
+ return rc;
if (wol->wolopts & WAKE_MAGIC) {
/* Enable the WOL interrupt */
@@ -331,22 +328,19 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
reg_val |= MII_VSC85XX_INT_MASK_WOL;
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
- goto out_unlock;
+ return rc;
} else {
/* Disable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
if (rc)
- goto out_unlock;
+ return rc;
}
/* Clear WOL iterrupt status */
reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-out_unlock:
- mutex_unlock(&phydev->lock);
-
- return rc;
+ return 0;
}
static void vsc85xx_wol_get(struct phy_device *phydev,
@@ -358,10 +352,9 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
- goto out_unlock;
+ goto out_restore_page;
reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (reg_val & SECURE_ON_ENABLE)
@@ -377,9 +370,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
}
}
-out_unlock:
+out_restore_page:
phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
- mutex_unlock(&phydev->lock);
}
#if IS_ENABLED(CONFIG_OF_MDIO)
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 047c581457e3..5813b07242ce 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -79,7 +79,7 @@
#define SGMII_ABILITY BIT(0)
#define VEND1_MII_BASIC_CONFIG 0xAFC6
-#define MII_BASIC_CONFIG_REV BIT(8)
+#define MII_BASIC_CONFIG_REV BIT(4)
#define MII_BASIC_CONFIG_SGMII 0x9
#define MII_BASIC_CONFIG_RGMII 0x7
#define MII_BASIC_CONFIG_RMII 0x5
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b33e55a7364e..99a07eb54c44 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st)
return NULL;
}
+static void phy_process_state_change(struct phy_device *phydev,
+ enum phy_state old_state)
+{
+ if (old_state != phydev->state) {
+ phydev_dbg(phydev, "PHY state change %s -> %s\n",
+ phy_state_to_str(old_state),
+ phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
+}
+
static void phy_link_up(struct phy_device *phydev)
{
phydev->phy_link_change(phydev, true);
@@ -1301,6 +1313,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
void phy_stop(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
+ enum phy_state old_state;
if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
WARN(1, "called from state %s\n",
@@ -1309,6 +1322,7 @@ void phy_stop(struct phy_device *phydev)
}
mutex_lock(&phydev->lock);
+ old_state = phydev->state;
if (phydev->state == PHY_CABLETEST) {
phy_abort_cable_test(phydev);
@@ -1319,6 +1333,7 @@ void phy_stop(struct phy_device *phydev)
sfp_upstream_stop(phydev->sfp_bus);
phydev->state = PHY_HALTED;
+ phy_process_state_change(phydev, old_state);
mutex_unlock(&phydev->lock);
@@ -1436,13 +1451,7 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
- if (old_state != phydev->state) {
- phydev_dbg(phydev, "PHY state change %s -> %s\n",
- phy_state_to_str(old_state),
- phy_state_to_str(phydev->state));
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
- }
+ phy_process_state_change(phydev, old_state);
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3f8a64fb9d71..1785f1cead97 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3098,8 +3098,6 @@ static int phy_probe(struct device *dev)
if (phydrv->flags & PHY_IS_INTERNAL)
phydev->is_internal = true;
- mutex_lock(&phydev->lock);
-
/* Deassert the reset signal */
phy_device_reset(phydev, 0);
@@ -3146,7 +3144,7 @@ static int phy_probe(struct device *dev)
*/
err = genphy_c45_read_eee_adv(phydev, phydev->advertising_eee);
if (err)
- return err;
+ goto out;
/* There is no "enabled" flag. If PHY is advertising, assume it is
* kind of enabled.
@@ -3188,12 +3186,10 @@ static int phy_probe(struct device *dev)
phydev->state = PHY_READY;
out:
- /* Assert the reset signal */
+ /* Re-assert the reset signal on error */
if (err)
phy_device_reset(phydev, 1);
- mutex_unlock(&phydev->lock);
-
return err;
}
@@ -3203,9 +3199,7 @@ static int phy_remove(struct device *dev)
cancel_delayed_work_sync(&phydev->state_queue);
- mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
- mutex_unlock(&phydev->lock);
sfp_bus_del_upstream(phydev->sfp_bus);
phydev->sfp_bus = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c02cad6478a8..fb98db61e06c 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2190,6 +2190,11 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
break;
}
+ /* Force a poll to re-read the hardware signal state after
+ * sfp_sm_mod_probe() changed state_hw_mask.
+ */
+ mod_delayed_work(system_wq, &sfp->poll, 1);
+
err = sfp_hwmon_insert(sfp);
if (err)
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ac7481ce2fc1..df2c5435c5c4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
};
struct smsc_phy_priv {
- u16 intmask;
bool energy_enable;
};
@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_intr(struct phy_device *phydev)
{
- struct smsc_phy_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
if (rc)
return rc;
- priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
- if (priv->energy_enable)
- priv->intmask |= MII_LAN83C185_ISF_INT7;
-
- rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
+ rc = phy_write(phydev, MII_LAN83C185_IM,
+ MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
} else {
- priv->intmask = 0;
-
rc = phy_write(phydev, MII_LAN83C185_IM, 0);
if (rc)
return rc;
@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
{
- struct smsc_phy_priv *priv = phydev->priv;
int irq_status;
irq_status = phy_read(phydev, MII_LAN83C185_ISF);
@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
- if (!(irq_status & priv->intmask))
+ if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
return IRQ_NONE;
phy_trigger_machine(phydev);
@@ -207,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
static int lan87xx_read_status(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
+ int err;
- int err = genphy_read_status(phydev);
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
/* Disable EDPD to wake up PHY */
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 743cbf5d662c..f7cff58fe044 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -666,8 +666,9 @@ static int asix_resume(struct usb_interface *intf)
static int ax88772_init_mdio(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
+ int ret;
- priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
+ priv->mdio = mdiobus_alloc();
if (!priv->mdio)
return -ENOMEM;
@@ -679,7 +680,20 @@ static int ax88772_init_mdio(struct usbnet *dev)
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
- return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret);
+ mdiobus_free(priv->mdio);
+ priv->mdio = NULL;
+ }
+
+ return ret;
+}
+
+static void ax88772_mdio_unregister(struct asix_common_private *priv)
+{
+ mdiobus_unregister(priv->mdio);
+ mdiobus_free(priv->mdio);
}
static int ax88772_init_phy(struct usbnet *dev)
@@ -896,16 +910,23 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
ret = ax88772_init_mdio(dev);
if (ret)
- return ret;
+ goto mdio_err;
ret = ax88772_phylink_setup(dev);
if (ret)
- return ret;
+ goto phylink_err;
ret = ax88772_init_phy(dev);
if (ret)
- phylink_destroy(priv->phylink);
+ goto initphy_err;
+ return 0;
+
+initphy_err:
+ phylink_destroy(priv->phylink);
+phylink_err:
+ ax88772_mdio_unregister(priv);
+mdio_err:
return ret;
}
@@ -926,6 +947,7 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
phylink_disconnect_phy(priv->phylink);
rtnl_unlock();
phylink_destroy(priv->phylink);
+ ax88772_mdio_unregister(priv);
asix_rx_fixup_common_free(dev->driver_priv);
}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c89639381eca..cd4083e0b3b9 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FE990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f18ab8e220db..c458c030fadf 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int temp;
-
- /* At forced 100 F/H mode, chip may fail to set mode correctly
- * when cable is switched between long(~50+m) and short one.
- * As workaround, set to 10 before setting to 100
- * at forced 100 F/H mode.
- */
- if (!phydev->autoneg && (phydev->speed == 100)) {
- /* disable phy interrupt */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- phy_write(phydev, LAN88XX_INT_MASK, temp);
- temp = phy_read(phydev, MII_BMCR);
- temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
- temp |= BMCR_SPEED100;
- phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
-
- /* clear pending interrupt generated while workaround */
- temp = phy_read(phydev, LAN88XX_INT_STS);
-
- /* enable phy interrupt back */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- phy_write(phydev, LAN88XX_INT_MASK, temp);
- }
+ phy_print_status(phydev);
}
static int irq_map(struct irq_domain *d, unsigned int irq,
@@ -3604,13 +3579,29 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x", rx_cmd_a);
} else {
- u32 frame_len = size - ETH_FCS_LEN;
+ u32 frame_len;
struct sk_buff *skb2;
+ if (unlikely(size < ETH_FCS_LEN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
+ frame_len = size - ETH_FCS_LEN;
+
skb2 = napi_alloc_skb(&dev->napi, frame_len);
if (!skb2)
return 0;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 7a2b0094de51..2894114858a2 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -62,12 +62,6 @@ pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
}
static inline int
-pl_clear_QuickLink_features(struct usbnet *dev, int val)
-{
- return pl_vendor_req(dev, 1, (u8) val, 0);
-}
-
-static inline int
pl_set_QuickLink_features(struct usbnet *dev, int val)
{
return pl_vendor_req(dev, 3, (u8) val, 0);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a808d718c012..571e37e67f9c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1364,6 +1364,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 95de452ff4da..5d6454fedb3f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 32d2c60d334d..563ecd27b93e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1833,6 +1833,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (u16)((header & RX_STS_FL_) >> 16);
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
+ return 0;
+ }
+
if (unlikely(header & RX_STS_ES_)) {
netif_dbg(dev, rx_err, dev->net,
"Error header=0x%08x\n", header);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 1bb54de7124d..c1178915496d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -708,7 +708,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_shinfo(skb)->nr_frags) {
+ skb_shinfo(skb)->nr_frags ||
+ skb_headroom(skb) < XDP_PACKET_HEADROOM) {
u32 size, len, max_head_size, off;
struct sk_buff *nskb;
struct page *page;
@@ -773,9 +774,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
consume_skb(skb);
skb = nskb;
- } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
- pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
- goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -1257,6 +1255,26 @@ static int veth_enable_range_safe(struct net_device *dev, int start, int end)
return 0;
}
+static void veth_set_xdp_features(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ peer = rtnl_dereference(priv->peer);
+ if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
+ xdp_features_t val = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
+ if (priv->_xdp_prog || veth_gro_requested(dev))
+ val |= NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+ xdp_set_features_flag(dev, val);
+ } else {
+ xdp_clear_features_flag(dev);
+ }
+}
+
static int veth_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
@@ -1323,6 +1341,12 @@ out:
if (peer)
netif_carrier_on(peer);
}
+
+ /* update XDP supported features */
+ veth_set_xdp_features(dev);
+ if (peer)
+ veth_set_xdp_features(peer);
+
return err;
revert:
@@ -1489,7 +1513,10 @@ static int veth_set_features(struct net_device *dev,
err = veth_napi_enable(dev);
if (err)
return err;
+
+ xdp_features_set_redirect_target(dev, true);
} else {
+ xdp_features_clear_redirect_target(dev);
veth_napi_del(dev);
}
return 0;
@@ -1570,10 +1597,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
+
+ xdp_features_set_redirect_target(dev, true);
}
if (old_prog) {
if (!prog) {
+ if (!veth_gro_requested(dev))
+ xdp_features_clear_redirect_target(dev);
+
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
@@ -1610,7 +1642,7 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
return 0;
@@ -1621,7 +1653,7 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = skb_get_hash(_ctx->skb);
return 0;
@@ -1686,10 +1718,6 @@ static void veth_setup(struct net_device *dev)
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
-
- dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
}
/*
@@ -1857,6 +1885,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
goto err_queues;
veth_disable_gro(dev);
+ /* update XDP supported features */
+ veth_set_xdp_features(dev);
+ veth_set_xdp_features(peer);
+
return 0;
err_queues:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fb5e68ed3ec2..2396c28c0122 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,7 +446,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
- unsigned int len, unsigned int truesize)
+ unsigned int len, unsigned int truesize,
+ unsigned int headroom)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -464,11 +465,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- buf = p;
+ buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
- tailroom = truesize - hdr_padded_len - len;
+ tailroom = truesize - headroom - hdr_padded_len - len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -545,6 +546,87 @@ ok:
return skb;
}
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+{
+ unsigned int len;
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (likely(!is_xdp_frame(ptr))) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ packets++;
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
+}
+
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ /* If running out of space, stop queue to avoid getting packets that we
+ * are then unable to transmit.
+ * An alternative would be to force queuing layer to requeue the skb by
+ * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+ * returned in a normal path of operation: it means that driver is not
+ * maintaining the TX queue stop/start state properly, and causes
+ * the stack to do a non-trivial amount of useless work.
+ * Since most packets only take 1 or 2 ring slots, stopping the queue
+ * early means 16 slots are typically wasted.
+ */
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ netif_stop_subqueue(dev, qnum);
+ if (use_napi) {
+ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ /* More just got used, free them then recheck. */
+ free_old_xmit_skbs(sq, false);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ netif_start_subqueue(dev, qnum);
+ virtqueue_disable_cb(sq->vq);
+ }
+ }
+ }
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -686,6 +768,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
ret = nxmit;
+ if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+ check_sq_full_and_disable(vi, dev, sq);
+
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
kicks = 1;
@@ -925,7 +1010,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
{
struct page *page = buf;
struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -1188,9 +1273,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
switch (act) {
case XDP_PASS:
+ head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
+ if (unlikely(!head_skb))
+ goto err_xdp_frags;
+
if (unlikely(xdp_page != page))
put_page(page);
- head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
rcu_read_unlock();
return head_skb;
case XDP_TX:
@@ -1248,7 +1336,7 @@ err_xdp_frags:
rcu_read_unlock();
skip_xdp:
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1714,52 +1802,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
-{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
-
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
-
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- u64_stats_update_end(&sq->stats.syncp);
-}
-
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
- return false;
- else if (q < vi->curr_queue_pairs)
- return true;
- else
- return false;
-}
-
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1989,30 +2031,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- /* If running out of space, stop queue to avoid getting packets that we
- * are then unable to transmit.
- * An alternative would be to force queuing layer to requeue the skb by
- * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
- * returned in a normal path of operation: it means that driver is not
- * maintaining the TX queue stop/start state properly, and causes
- * the stack to do a non-trivial amount of useless work.
- * Since most packets only take 1 or 2 ring slots, stopping the queue
- * early means 16 slots are typically wasted.
- */
- if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
- netif_stop_subqueue(dev, qnum);
- if (use_napi) {
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
- /* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
- if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
- netif_start_subqueue(dev, qnum);
- virtqueue_disable_cb(sq->vq);
- }
- }
- }
+ check_sq_full_and_disable(vi, dev, sq);
if (kick || netif_xmit_stopped(txq)) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 1c53b5546927..47c2ad7a3e42 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1177,14 +1177,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
uhdlc_priv->dev = &pdev->dev;
uhdlc_priv->ut_info = ut_info;
- if (of_get_property(np, "fsl,tdm-interface", NULL))
- uhdlc_priv->tsa = 1;
-
- if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
- uhdlc_priv->loopback = 1;
-
- if (of_get_property(np, "fsl,hdlc-bus", NULL))
- uhdlc_priv->hdlc_bus = 1;
+ uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface");
+ uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback");
+ uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus");
if (uhdlc_priv->tsa == 1) {
utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 583adb37ee1e..125284b346a7 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -106,7 +106,7 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
unsigned int cpu = *stored_cpu, cpu_index, i;
- if (unlikely(cpu == nr_cpumask_bits ||
+ if (unlikely(cpu >= nr_cpu_ids ||
!cpumask_test_cpu(cpu, cpu_online_mask))) {
cpu_index = id % cpumask_weight(cpu_online_mask);
cpu = cpumask_first(cpu_online_mask);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 565522466eba..b55b1b17f4d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
rcu_read_lock();
do {
- while (likely(!mvmtxq->stopped &&
+ while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
+ &mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
+ &mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
@@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- /*
- * Please note that racing is handled very carefully here:
- * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
- * deleted afterwards.
- * This means that if:
- * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
- * queue is allocated and we can TX.
- * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
- * a race, should defer the frame.
- * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
- * need to allocate the queue and defer the frame.
- * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
- * queue is already scheduled for allocation, no need to allocate,
- * should defer the frame.
- */
-
- /* If the queue is allocated TX and return. */
- if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
- /*
- * Check that list is empty to avoid a race where txq_id is
- * already updated, but the queue allocation work wasn't
- * finished
- */
- if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
- return;
-
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
- /* The list is being deleted only after the queue is fully allocated. */
- if (!list_empty(&mvmtxq->list))
- return;
+ /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+ * to handle any packets we leave on the txq now
+ */
- list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
- schedule_work(&mvm->add_stream_wk);
+ spin_lock_bh(&mvm->add_stream_lock);
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (list_empty(&mvmtxq->list) &&
+ /* recheck under lock */
+ !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+ list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+ schedule_work(&mvm->add_stream_wk);
+ }
+ spin_unlock_bh(&mvm->add_stream_lock);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 90bc95d96a78..f307c345dfa0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
- bool stopped;
+#define IWL_MVM_TXQ_STATE_STOP_FULL 0
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
+#define IWL_MVM_TXQ_STATE_READY 2
+ unsigned long state;
};
static inline struct iwl_mvm_txq *
@@ -827,6 +830,7 @@ struct iwl_mvm {
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */
+ spinlock_t add_stream_lock;
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index f4e9446d9dc2..9711841bb456 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
+ spin_lock_init(&mvm->add_stream_lock);
init_waitqueue_head(&mvm->rx_sync_waitq);
@@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- mvmtxq->stopped = !start;
+ if (start)
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ else
+ set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 69634fb82a9b..9caae77995ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop the queue and wait for it to empty */
- txq->stopped = true;
+ set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
out:
/* Continue using the queue */
- txq->stopped = false;
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
return ret;
}
@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
* a queue in the function itself.
*/
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+ spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
+ spin_unlock_bh(&mvm->add_stream_lock);
continue;
}
- list_del_init(&mvmtxq->list);
+ /* now we're ready, any remaining races/concurrency will be
+ * handled in iwl_mvm_mac_itxq_xmit()
+ */
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
+ spin_lock_bh(&mvm->add_stream_lock);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ spin_unlock_bh(&mvm->add_stream_lock);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 5dcf61761a16..9a698a16a8f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
.can_ext_scan = true,
};
-static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
{ }
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index c64e24c10ea6..a24bd40dd41a 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
-static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8978" },
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b117e4467c87..34abf70f44af 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
if (ret)
return ret;
+ set_bit(MT76_STATE_REGISTERED, &phy->state);
phy->dev->phys[phy->band_idx] = phy;
return 0;
@@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
+ if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
+ return;
+
if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(phy);
mt76_tx_status_check(dev, true);
@@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+ set_bit(MT76_STATE_REGISTERED, &phy->state);
sched_set_fifo_low(dev->tx_worker.task);
return 0;
@@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
+ if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
+ return;
+
if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(&dev->phy);
mt76_tx_status_check(dev, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index ccca0162c8f8..183b0fc5a2d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -402,6 +402,7 @@ struct mt76_tx_cb {
enum {
MT76_STATE_INITIALIZED,
+ MT76_STATE_REGISTERED,
MT76_STATE_RUNNING,
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index efb9bfaa187f..008ece1b16f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
{
+ if (!mt76_is_mmio(dev))
+ return 0;
+
if (!mtk_wed_device_active(&dev->mmio.wed))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 1ab768feccaa..5e288116b1b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
hw->max_tx_fragments = 4;
@@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
}
if (phy->mt76->cap.has_5ghz) {
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
@@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
IEEE80211_HT_MPDU_DENSITY_4;
if (is_mt7915(&dev->mt76)) {
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
} else {
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
/* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
- phy->mt76->sband_5g.sband.vht_cap.cap |=
+ vht_cap->cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
}
+
+ if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
}
mt76_set_stream_caps(phy->mt76, true);
@@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts;
- /* mt7915 doesn't support bw160 */
- if (is_mt7915(&dev->mt76))
- sts_160 = 0;
+ /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76)) {
+ if (!dev->dbdc_support)
+ sts_160 /= 2;
+ else
+ sts_160 = 0;
+ }
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
- u8 nss_160 = nss;
+ u8 nss_160;
- /* Can't do 160MHz with mt7915 */
- if (is_mt7915(&dev->mt76))
+ if (!is_mt7915(&dev->mt76))
+ nss_160 = nss;
+ else if (!dev->dbdc_support)
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ nss_160 = nss / 2;
+ else
+ /* Can't do 160MHz with mt7915 dbdc */
nss_160 = 0;
for (i = 0; i < 8; i++) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 2d2edddc77bd..3f88e6a0a510 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -447,8 +447,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
dev_info(&spi->dev, "selected chip family is %s\n",
pdev_data->family->name);
- if (of_find_property(dt_node, "clock-xtal", NULL))
- pdev_data->ref_clock_xtal = true;
+ pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal");
/* optional clock frequency params */
of_property_read_u32(dt_node, "ref-clock-frequency",
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 2d53e0f88d2f..1e0f2297f9c6 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
len, sizeof(**fw_vsc_cfg),
GFP_KERNEL);
+ if (!*fw_vsc_cfg)
+ goto alloc_err;
+
r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
*fw_vsc_cfg, len);
@@ -260,6 +263,7 @@ vsc_read_err:
*fw_vsc_cfg = NULL;
}
+alloc_err:
dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
}
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index ed9c5e2cf3ad..a187f0e0b0f7 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
out->data, out->len, false);
+ arg.phy = phy;
init_completion(&arg.done);
cntx = phy->out_urb->context;
phy->out_urb->context = &arg;
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 755460a73c0d..d2aa9f766738 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe);
void ndlc_remove(struct llt_ndlc *ndlc)
{
- st_nci_remove(ndlc->ndev);
-
/* cancel timers */
del_timer_sync(&ndlc->t1_timer);
del_timer_sync(&ndlc->t2_timer);
ndlc->t2_active = false;
ndlc->t1_active = false;
+ /* cancel work */
+ cancel_work_sync(&ndlc->sm_work);
+
+ st_nci_remove(ndlc->ndev);
skb_queue_purge(&ndlc->rcv_q);
skb_queue_purge(&ndlc->send_q);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c2730b116dc6..53ef028596c6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
range = page_address(ns->ctrl->discard_page);
}
- __rq_for_each_bio(bio, req) {
- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
- if (n < segments) {
- range[n].cattr = cpu_to_le32(0);
- range[n].nlb = cpu_to_le32(nlb);
- range[n].slba = cpu_to_le64(slba);
+ if (queue_max_discard_segments(req->q) == 1) {
+ u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+ u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+ range[0].cattr = cpu_to_le32(0);
+ range[0].nlb = cpu_to_le32(nlb);
+ range[0].slba = cpu_to_le64(slba);
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+ if (n < segments) {
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ }
+ n++;
}
- n++;
}
if (WARN_ON_ONCE(n != segments)) {
@@ -3053,7 +3063,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
else
ctrl->max_zeroes_sectors = 0;
- if (nvme_ctrl_limited_cns(ctrl))
+ if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ nvme_ctrl_limited_cns(ctrl))
return 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 723e7d5b778f..d24ea2e05156 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -464,7 +464,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
@@ -485,17 +486,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
- io_uring_cmd_done(ioucmd, status, result);
+ io_uring_cmd_done(ioucmd, status, result, issue_flags);
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
+ unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -517,7 +519,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd);
+ nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
@@ -539,7 +541,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
- nvme_uring_task_meta_cb(ioucmd);
+ nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index fc39d01e7b63..9171452e2f6d 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq)
return;
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
- nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0,
- blk_rq_bytes(rq) >> SECTOR_SHIFT,
- req_op(rq), jiffies);
+ nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
+ jiffies);
}
EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
@@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq)
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
- nvme_req(rq)->start_time);
+ blk_rq_bytes(rq) >> SECTOR_SHIFT,
+ nvme_req(rq)->start_time);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b95c94ee40f..b615906263f3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3073,6 +3073,7 @@ out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
+ nvme_put_ctrl(&dev->ctrl);
return result;
}
@@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
@@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7723a4989524..42c0598c31f2 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
+static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
+{
+ return req->pdu;
+}
+
+static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
+{
+ /* use the pdu space in the back for the data pdu */
+ return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
+ sizeof(struct nvme_tcp_data_pdu);
+}
+
static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
if (nvme_is_fabrics(req->req.cmd))
@@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
- struct nvme_tcp_data_pdu *data = req->pdu;
+ struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
u32 h2cdata_sent = req->pdu_len;
@@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
bool inline_data = nvme_tcp_has_inline_data(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
@@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_data_pdu *pdu = req->pdu;
+ struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) - req->offset + hdgst;
int ret;
@@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
int qid = nvme_tcp_queue_id(req->queue);
@@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
struct nvme_command *c = &pdu->cmd;
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
struct nvme_tcp_queue *queue = req->queue;
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret;
@@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
+ BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
+
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvme_tcp_wq)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f66ed13d7c11..3935165048e7 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_sq *sq = req->sq;
+
__nvmet_req_complete(req, status);
- percpu_ref_put(&req->sq->ref);
+ percpu_ref_put(&sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 174ef3574e07..22024b830788 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1231,7 +1231,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
"#nvmem-cell-cells",
index, &cell_spec);
if (ret)
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOENT);
if (cell_spec.args_count > 1)
return ERR_PTR(-EINVAL);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 83ae838ceb5f..549c4bd5caec 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
}
EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
+{
+ struct pci_bus_resource *bus_res, *tmp;
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ if (bus->resource[i] == res) {
+ bus->resource[i] = NULL;
+ return;
+ }
+ }
+
+ list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
+ if (bus_res->res == res) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ return;
+ }
+ }
+}
+
void pci_bus_remove_resources(struct pci_bus *bus)
{
int i;
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 0de7c255254e..d6de5a294128 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
u_cmd.insize > EC_MAX_MSG_BYTES)
return -EINVAL;
- s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 09c7829e95c4..382793e73a60 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
config MLXREG_HOTPLUG
tristate "Mellanox platform hotplug driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
+ select REGMAP
help
This driver handles hot-plug events for the power suppliers, power
cables and fans on the wide range Mellanox IB and Ethernet systems.
config MLXREG_IO
tristate "Mellanox platform register access driver support"
- depends on REGMAP
depends on HWMON
+ select REGMAP
help
This driver allows access to Mellanox programmable device register
space through sysfs interface. The sets of registers for sysfs access
@@ -36,9 +36,9 @@ config MLXREG_IO
config MLXREG_LC
tristate "Mellanox line card platform driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
+ select REGMAP
help
This driver provides support for the Mellanox MSN4800-XX line cards,
which are the part of MSN4800 Ethernet modular switch systems
@@ -80,10 +80,9 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
- depends on REGMAP_I2C
+ select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
The SN2201 is a highly integrated for one rack unit system with
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ec7c2b4e1721..4a01b315e0a9 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -955,7 +955,8 @@ config SERIAL_MULTI_INSTANTIATE
config MLX_PLATFORM
tristate "Mellanox Technologies platform support"
- depends on I2C && REGMAP
+ depends on I2C
+ select REGMAP
help
This option enables system support for the Mellanox Technologies
platform. The Mellanox systems provide data center networking
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index ab05b9ee6655..2edaae04a691 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -171,9 +171,7 @@ MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
-#endif
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -386,7 +384,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
return 0;
}
-#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
@@ -400,7 +397,6 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
table.timein_s0i3_lastcapture);
}
-#endif
static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
{
@@ -673,7 +669,6 @@ out_unlock:
return rc;
}
-#ifdef CONFIG_SUSPEND
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -861,9 +856,7 @@ static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
-
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
@@ -905,7 +898,6 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return 0;
}
-#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
@@ -926,7 +918,6 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
return 0;
}
-#endif
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
@@ -1017,11 +1008,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
-#ifdef CONFIG_SUSPEND
- err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
- if (err)
- dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
-#endif
+ if (IS_ENABLED(CONFIG_SUSPEND)) {
+ err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
+ if (err)
+ dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+ }
amd_pmc_dbgfs_register(dev);
return 0;
@@ -1035,9 +1026,8 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
-#ifdef CONFIG_SUSPEND
- acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
-#endif
+ if (IS_ENABLED(CONFIG_SUSPEND))
+ acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
@@ -1061,9 +1051,7 @@ static struct platform_driver amd_pmc_driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
.dev_groups = pmc_groups,
-#ifdef CONFIG_SUSPEND
- .pm = &amd_pmc_pm,
-#endif
+ .pm = pm_sleep_ptr(&amd_pmc_pm),
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index d547c9d09725..2750dee99c3e 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/kstrtox.h>
-#include <linux/math.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -96,6 +95,7 @@ struct combined_chip_info {
};
struct dell_wmi_ddv_sensors {
+ bool active;
struct mutex lock; /* protect caching */
unsigned long timestamp;
union acpi_object *obj;
@@ -520,6 +520,9 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_create(struct device *dev
static void dell_wmi_ddv_hwmon_cache_invalidate(struct dell_wmi_ddv_sensors *sensors)
{
+ if (!sensors->active)
+ return;
+
mutex_lock(&sensors->lock);
kfree(sensors->obj);
sensors->obj = NULL;
@@ -530,6 +533,7 @@ static void dell_wmi_ddv_hwmon_cache_destroy(void *data)
{
struct dell_wmi_ddv_sensors *sensors = data;
+ sensors->active = false;
mutex_destroy(&sensors->lock);
kfree(sensors->obj);
}
@@ -549,6 +553,7 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_init(struct wmi_device *w
return ERR_PTR(ret);
mutex_init(&sensors->lock);
+ sensors->active = true;
ret = devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
if (ret < 0)
@@ -659,7 +664,8 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", DIV_ROUND_CLOSEST(value, 10));
+ /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
+ return sysfs_emit(buf, "%d\n", value - 2731);
}
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -852,7 +858,7 @@ static int dell_wmi_ddv_resume(struct device *dev)
{
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
- /* Force re-reading of all sensors */
+ /* Force re-reading of all active sensors */
dell_wmi_ddv_hwmon_cache_invalidate(&data->fans);
dell_wmi_ddv_hwmon_cache_invalidate(&data->temps);
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index 309eab9c0558..322237e056f3 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -159,9 +159,10 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data =
static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
.dev_name = "i2c-INT3472:01",
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
- .n_gpiod_lookups = 1,
+ .n_gpiod_lookups = 2,
.tps68470_gpio_lookup_tables = {
- &surface_go_int347a_gpios
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
},
};
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index a7e02b24a87a..0954a04623ed 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -47,7 +47,7 @@ struct isst_cmd_set_req_type {
static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
{0xD0, 0x00, 0x03},
- {0x7F, 0x00, 0x0B},
+ {0x7F, 0x00, 0x0C},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
{0x94, 0x03, 0x03},
@@ -112,6 +112,7 @@ static void isst_delete_hash(void)
* isst_store_cmd() - Store command to a hash table
* @cmd: Mailbox command.
* @sub_cmd: Mailbox sub-command or MSR id.
+ * @cpu: Target CPU for the command
* @mbox_cmd_type: Mailbox or MSR command.
* @param: Mailbox parameter.
* @data: Mailbox request data or MSR data.
@@ -363,7 +364,7 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
/**
* isst_if_get_pci_dev() - Get the PCI device instance for a CPU
* @cpu: Logical CPU number.
- * @bus_number: The bus number assigned by the hardware.
+ * @bus_no: The bus number assigned by the hardware.
* @dev: The device number assigned by the hardware.
* @fn: The function number assigned by the hardware.
*
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index fdecdae248d7..35ff506b402e 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -40,6 +40,7 @@
* @offset: Offset to the first valid member in command structure.
* This will be the offset of the start of the command
* after command count field
+ * @owner: Registered module owner
* @cmd_callback: Callback function to handle IOCTL. The callback has the
* command pointer with data for command. There is a pointer
* called write_only, which when set, will not copy the
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index c60733261c89..c999732b0f1e 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -209,14 +209,14 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
if (!name)
return -EOPNOTSUPP;
- feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
- if (!feature_vsec_dev)
+ res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res)
return -ENOMEM;
- res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
- if (!res) {
+ feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
+ if (!feature_vsec_dev) {
ret = -ENOMEM;
- goto free_vsec;
+ goto free_res;
}
snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
@@ -239,6 +239,8 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
/*
* intel_vsec_add_aux() is resource managed, no explicit
* delete is required on error or on module unload.
+ * feature_vsec_dev memory is also freed as part of device
+ * delete.
*/
ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
feature_vsec_dev, feature_id_name);
@@ -249,8 +251,6 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
free_res:
kfree(res);
-free_vsec:
- kfree(feature_vsec_dev);
return ret;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 7b6779cdb134..67367f010139 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -5980,7 +5980,7 @@ MODULE_DEVICE_TABLE(dmi, mlxplat_dmi_table);
static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
{
struct i2c_adapter *search_adap;
- int shift, i;
+ int i, shift = 0;
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 8e6f8a655079..05f413178462 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ continue;
ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
if (ret < 0)
return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index be34b9848450..de67b985f0a9 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1906,6 +1906,7 @@ static void bq24190_remove(struct i2c_client *client)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
+ cancel_delayed_work_sync(&bdi->input_current_limit_work);
error = pm_runtime_resume_and_get(bdi->dev);
if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index cadb6a0c2cc7..b6c96376776a 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_current_max = 0;
break;
default:
- dev_err(dev, "Port %d: default case!\n", port->port_number);
+ dev_dbg(dev, "Port %d: default case!\n", port->port_number);
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index 14da5c595dd9..a87aeaea38e1 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -657,6 +657,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+ cancel_work_sync(&charger->otg_work);
power_supply_unregister(charger->battery);
power_supply_unregister(charger->usb);
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 4f9c1c417916..36f807b5ec44 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
- if (tmp < 0)
- tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
charger->res_div) / 1000;
/*
@@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
- if (tmp < 0)
- tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
bulk_reg, 2);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 362fa631f39b..a226dc1b65d7 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1145,10 +1145,12 @@ static int alua_activate(struct scsi_device *sdev,
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- if (alua_rtpg_queue(pg, sdev, qdata, true))
+ if (alua_rtpg_queue(pg, sdev, qdata, true)) {
fn = NULL;
- else
+ } else {
+ kfree(qdata);
err = SCSI_DH_DEV_OFFLINED;
+ }
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0c3fcb807806..a63279f55d09 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2495,8 +2495,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
shost->nr_hw_queues = hisi_hba->cq_nvecs;
- devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
- return 0;
+ return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
}
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f7f62e56afca..9b6fbbe15d92 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
- /* In case scsi_remove_host() has not been called. */
- scsi_proc_hostdir_rm(shost->hostt);
-
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 61958a24a43d..4f7485958c49 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7291,6 +7291,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
/* Find out if the FW has a new set of congestion parameters. */
len = sizeof(struct lpfc_cgn_param);
pdata = kzalloc(len, GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
pdata, len);
@@ -12563,7 +12565,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
goto found_same;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* At this point, we leave the CPU as unassigned */
@@ -12577,7 +12579,7 @@ found_same:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12613,7 +12615,7 @@ found_same:
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* We should never leave an entry unassigned */
@@ -12631,7 +12633,7 @@ found_any:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12704,7 +12706,7 @@ found_any:
goto found_hdwq;
}
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12719,7 +12721,7 @@ found_any:
goto found_hdwq;
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12730,7 +12732,7 @@ found_any:
found_hdwq:
/* We found an available entry, copy the IRQ info */
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
cpup->hdwq = new_cpup->hdwq;
logit:
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c5b69f313af3..cf630aa6734e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -21899,20 +21899,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
{
- struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
struct lpfc_io_buf *lpfc_ncmd_next;
unsigned long iflag;
struct lpfc_epd_pool *epd_pool;
epd_pool = &phba->epd_pool;
- lpfc_ncmd = NULL;
spin_lock_irqsave(&epd_pool->lock, iflag);
if (epd_pool->count > 0) {
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ list_for_each_entry_safe(iter, lpfc_ncmd_next,
&epd_pool->list, list) {
- list_del(&lpfc_ncmd->list);
+ list_del(&iter->list);
epd_pool->count--;
+ lpfc_ncmd = iter;
break;
}
}
@@ -22109,10 +22109,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
struct lpfc_dmabuf *pcmd;
u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
- /* sanity check on queue memory */
- if (!datap)
- return -ENODEV;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 4919ea54b827..63bac3684c19 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -23,8 +23,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.719.03.00-rc1"
-#define MEGASAS_RELDATE "Sep 29, 2021"
+#define MEGASAS_VERSION "07.725.01.00-rc1"
+#define MEGASAS_RELDATE "Mar 2, 2023"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_SUPPORTED_LD_IDS 240
+
#define MEGASAS_MAX_SECTORS (2*1024)
#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
@@ -1758,7 +1760,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:16;
+ u32 reserved:15;
+ u32 support_memdump:1;
u32 support_fw_exposed_dev_list:1;
u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
@@ -1792,7 +1795,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_64bit_mode:1;
u32 support_nvme_passthru:1;
u32 support_fw_exposed_dev_list:1;
- u32 reserved:16;
+ u32 support_memdump:1;
+ u32 reserved:15;
#endif
} mfi_capabilities;
__le32 reg;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index da1cad1ee123..4463a538102a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
ld = MR_TargetIdToLdGet(i, drv_map);
/* For non existing VDs, iterate to next VD*/
- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
continue;
raid = MR_LdRaidGet(ld, drv_map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 6597e118c805..84c9a55a5794 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1201,6 +1201,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_nvme_passthru = 1;
drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
+ if (reset_devices)
+ drv_ops->mfi_capabilities.support_memdump = 1;
+
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 23de2603e71f..364fb1b5e45a 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -902,6 +902,7 @@ struct scmd_priv {
* @admin_reply_ephase:Admin reply queue expected phase
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
+ * @admin_reply_q_in_use: Queue is handled by poll/ISR
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1055,6 +1056,7 @@ struct mpi3mr_ioc {
u8 admin_reply_ephase;
void *admin_reply_base;
dma_addr_t admin_reply_dma;
+ atomic_t admin_reply_q_in_use;
u32 ready_timeout;
@@ -1390,4 +1392,7 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index bff637702397..d10c6afb7f9c 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -886,7 +886,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
* each time through the loop.
*/
*prp_entry = cpu_to_le64(dma_addr);
- if (*prp1_entry & sgemod_mask) {
+ if (*prp_entry & sgemod_mask) {
dprint_bsg_err(mrioc,
"%s: PRP address collides with SGE modifier\n",
__func__);
@@ -895,7 +895,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
*prp_entry &= ~sgemod_mask;
*prp_entry |= sgemod_val;
prp_entry++;
- prp_entry_dma++;
+ prp_entry_dma += prp_size;
}
/*
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 758f7ca9e0ee..a565817aa56d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -415,7 +415,7 @@ out:
le64_to_cpu(scsi_reply->sense_data_buffer_address));
}
-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
{
u32 exp_phase = mrioc->admin_reply_ephase;
u32 admin_reply_ci = mrioc->admin_reply_ci;
@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u64 reply_dma = 0;
struct mpi3_default_reply_descriptor *reply_desc;
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ return 0;
+
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
if ((le16_to_cpu(reply_desc->reply_flags) &
- MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&mrioc->admin_reply_q_in_use);
return 0;
+ }
do {
if (mrioc->unrecoverable)
@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
mrioc->admin_reply_ci = admin_reply_ci;
mrioc->admin_reply_ephase = exp_phase;
+ atomic_dec(&mrioc->admin_reply_q_in_use);
return num_admin_replies;
}
@@ -1192,7 +1198,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
*/
static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
{
- u32 ioc_config, ioc_status, timeout;
+ u32 ioc_config, ioc_status, timeout, host_diagnostic;
int retval = 0;
enum mpi3mr_iocstate ioc_state;
u64 base_info;
@@ -1246,6 +1252,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
retval, mpi3mr_iocstate_name(ioc_state));
}
if (ioc_state != MRIOC_STATE_RESET) {
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ mpi3mr_print_fault_info(mrioc);
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present at the bringup\n");
+ goto out_device_not_present;
+ }
+ msleep(100);
+ } while (--timeout);
+ }
mpi3mr_print_fault_info(mrioc);
ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
retval = mpi3mr_issue_reset(mrioc,
@@ -2605,6 +2628,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
mrioc->admin_reply_ci = 0;
mrioc->admin_reply_ephase = 1;
mrioc->admin_reply_base = NULL;
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
if (!mrioc->admin_req_base) {
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -3813,27 +3837,34 @@ retry_init:
mpi3mr_print_ioc_info(mrioc);
- dprint_init(mrioc, "allocating config page buffers\n");
- mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
- MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
- if (!mrioc->cfg_page)
- goto out_failed_noretry;
-
- mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ if (!mrioc->cfg_page) {
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ }
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
- goto out_failed_noretry;
+ if (!mrioc->init_cmds.reply) {
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
+ }
}
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed_noretry;
+ if (!mrioc->chain_sgl_list) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
}
retval = mpi3mr_issue_iocinit(mrioc);
@@ -3879,8 +3910,10 @@ retry_init:
dprint_init(mrioc, "allocating memory for throttle groups\n");
sz = sizeof(struct mpi3mr_throttle_group_info);
mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
- if (!mrioc->throttle_groups)
+ if (!mrioc->throttle_groups) {
+ retval = -1;
goto out_failed_noretry;
+ }
}
retval = mpi3mr_enable_events(mrioc);
@@ -3900,6 +3933,7 @@ out_failed:
mpi3mr_memset_buffers(mrioc);
goto retry_init;
}
+ retval = -1;
out_failed_noretry:
ioc_err(mrioc, "controller initialization failed\n");
mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
@@ -4012,6 +4046,7 @@ retry_init:
ioc_err(mrioc,
"cannot create minimum number of operational queues expected:%d created:%d\n",
mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ retval = -1;
goto out_failed_noretry;
}
@@ -4078,6 +4113,7 @@ out_failed:
mpi3mr_memset_buffers(mrioc);
goto retry_init;
}
+ retval = -1;
out_failed_noretry:
ioc_err(mrioc, "controller %s is failed\n",
(is_resume)?"resume":"re-initialization");
@@ -4155,6 +4191,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
if (mrioc->admin_reply_base)
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
if (mrioc->init_cmds.reply) {
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
@@ -4350,13 +4387,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
mrioc->admin_req_base, mrioc->admin_req_dma);
mrioc->admin_req_base = NULL;
}
-
+ if (mrioc->cfg_page) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+ mrioc->cfg_page, mrioc->cfg_page_dma);
+ mrioc->cfg_page = NULL;
+ }
if (mrioc->pel_seqnum_virt) {
dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
mrioc->pel_seqnum_virt = NULL;
}
+ kfree(mrioc->throttle_groups);
+ mrioc->throttle_groups = NULL;
+
kfree(mrioc->logdata_buf);
mrioc->logdata_buf = NULL;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 6eaeba41072c..6d55698ea4d1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3720,6 +3720,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
mpi3mr_poll_pend_io_completions(mrioc);
mpi3mr_ioc_enable_intr(mrioc);
mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_process_admin_reply_q(mrioc);
}
switch (tm_type) {
case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
@@ -5077,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev)
struct workqueue_struct *wq;
unsigned long flags;
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_hba_port *port, *hba_port_next;
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
if (!shost)
return;
@@ -5116,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_free_mem(mrioc);
mpi3mr_cleanup_resources(mrioc);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ port, port->port_id);
+ list_del(&port->list);
+ kfree(port);
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->sas_hba.num_phys) {
+ kfree(mrioc->sas_hba.phy);
+ mrioc->sas_hba.phy = NULL;
+ mrioc->sas_hba.num_phys = 0;
+ }
+
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 3b61815979da..5748bd9369ff 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -9,9 +9,6 @@
#include "mpi3mr.h"
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_sas_node *sas_expander);
-
/**
* mpi3mr_post_transport_req - Issue transport requests and wait
* @mrioc: Adapter instance reference
@@ -1552,7 +1549,8 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
list_for_each_entry_safe(mr_sas_phy, next_phy,
&mr_sas_port->phy_list, port_siblings) {
- if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ if ((!mrioc->stop_drv_processing) &&
+ (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
dev_info(&mr_sas_port->port->dev,
"remove: sas_address(0x%016llx), phy(%d)\n",
(unsigned long long)
@@ -2163,7 +2161,7 @@ out_fail:
*
* Return nothing.
*/
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
struct mpi3mr_sas_node *sas_expander)
{
struct mpi3mr_sas_port *mr_sas_port, *next;
@@ -2357,15 +2355,16 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
tgtdev->host_exposed = 1;
if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
sas_address_parent, hba_port)) {
- tgtdev->host_exposed = 0;
retval = -1;
- } else if ((!tgtdev->starget)) {
- if (!mrioc->is_driver_loading)
+ } else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
mpi3mr_sas_port_remove(mrioc, sas_address,
sas_address_parent, hba_port);
- tgtdev->host_exposed = 0;
retval = -1;
}
+ if (retval) {
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+ tgtdev->host_exposed = 0;
+ }
return retval;
}
@@ -2394,6 +2393,7 @@ void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
hba_port);
tgtdev->host_exposed = 0;
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
}
/**
@@ -2450,7 +2450,7 @@ static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *
tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
rphy->identify.sas_address, rphy);
- if (tgtdev) {
+ if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
port_id =
tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
mpi3mr_tgtdev_put(tgtdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e5ecd6ada6cd..e8a4750f6ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
- if ((sas_port_add(port))) {
+ if (!port || (sas_port_add(port))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
goto out_fail;
@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port->remote_identify.sas_address;
}
+ if (!rphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_delete_port;
+ }
+
rphy->identify = mpt3sas_port->remote_identify;
if ((sas_rphy_add(rphy))) {
@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
__FILE__, __LINE__, __func__);
sas_rphy_free(rphy);
rphy = NULL;
+ goto out_delete_port;
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
rphy_to_expander_device(rphy), hba_port->port_id);
return mpt3sas_port;
- out_fail:
+out_delete_port:
+ sas_port_delete(port);
+
+out_fail:
list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
port_siblings)
list_del(&mpt3sas_phy->port_siblings);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9142df876c73..9aba07c7bde4 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -192,6 +192,7 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
+extern int ql2xfc2target;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1dbc1496ebed..ec0423ec6681 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1840,7 +1840,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE &&
+ if (ql2xfc2target &&
+ fcport->flags & FCF_FCP2_DEVICE &&
atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 030625ebb4e6..71feda2cdb63 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1900,6 +1900,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
}
req->outstanding_cmds[index] = NULL;
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
return sp;
}
@@ -3112,7 +3114,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
bsg_reply->reply_payload_rcv_len = 0;
- qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 545167627e48..bee1b8a82020 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -360,6 +360,13 @@ MODULE_PARM_DESC(ql2xnvme_queues,
"1 - Minimum number of queues supported\n"
"8 - Default value");
+int ql2xfc2target = 1;
+module_param(ql2xfc2target, int, 0444);
+MODULE_PARM_DESC(qla2xfc2target,
+ "Enables FC2 Target support. "
+ "0 - FC2 Target support is disabled. "
+ "1 - FC2 Target support is enabled (default).");
+
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -1858,6 +1865,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /*
+ * perform lockless completion during driver unload
+ */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -4085,7 +4103,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
"Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->loop_id != FC_NO_LOOP_ID &&
+ if (ql2xfc2target &&
+ fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) &&
fcport->port_type == FCT_TARGET &&
!qla2x00_reset_active(vha)) {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7d2210a006f0..5cce1ba70fc6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
int result;
+ if (sdev->no_vpd_size)
+ return SCSI_DEFAULT_VPD_LEN;
+
/*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c7080454aea9..3fcaf10a9dfe 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -134,7 +134,7 @@ static struct {
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
- {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -188,6 +188,7 @@ static struct {
{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
@@ -233,6 +234,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 4e842d79de31..d217be323cc6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1057,6 +1057,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_SKIP_VPD_PAGES)
sdev->skip_vpd_pages = 1;
+ if (*bflags & BLIST_NO_VPD_SIZE)
+ sdev->no_vpd_size = 1;
+
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4f28dd617eca..4bb87043e6db 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2988,8 +2988,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
if (sdkp->device->type == TYPE_ZBC) {
- /* Host-managed */
+ /*
+ * Host-managed: Per ZBC and ZAC specifications, writes in
+ * sequential write required zones of host-managed devices must
+ * be aligned to the device physical block size.
+ */
disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
+ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
} else {
sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6b3a02d4406c..22801c24ea19 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -965,14 +965,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
- /*
- * Per ZBC and ZAC specifications, writes in sequential write required
- * zones of host-managed devices must be aligned to the device physical
- * block size.
- */
- if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
- blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
-
sdkp->early_zone_info.nr_zones = nr_zones;
sdkp->early_zone_info.zone_blocks = zone_blocks;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 33f568b7f54d..d9ce379c4d2e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -988,6 +988,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
}
/*
+ * Check for "Operating parameters have changed"
+ * due to Hyper-V changing the VHD/VHDX BlockSize
+ * when adding/removing a differencing disk. This
+ * causes discard_granularity to change, so do a
+ * rescan to pick up the new granularity. We don't
+ * want scsi_report_sense() to output a message
+ * that a sysadmin wouldn't know what to do with.
+ */
+ if ((asc == 0x3f) && (ascq != 0x03) &&
+ (ascq != 0x0e)) {
+ process_err_fn = storvsc_device_scan;
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+
+ /*
* Otherwise, let upper layer deal with the
* error when sense message is present
*/
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 23ce2f78c4ed..26efe12012a0 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -191,9 +191,9 @@ static const struct llcc_slice_config sc8280xp_data[] = {
{ LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
{ LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
{ LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
- { LLCC_CVPFW, 32, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUSS1, 33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
- { LLCC_CPUHWT, 36, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
};
static const struct llcc_slice_config sdm845_data[] = {
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 2d3ee22b9249..538fa182169a 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -176,7 +176,8 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
struct reserved_mem *rmem;
struct qcom_rmtfs_mem *rmtfs_mem;
u32 client_id;
- u32 num_vmids, vmid[NUM_MAX_VMIDS];
+ u32 vmid[NUM_MAX_VMIDS];
+ int num_vmids;
int ret, i;
rmem = of_reserved_mem_lookup(node);
@@ -228,8 +229,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
}
num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
- if (num_vmids < 0) {
- dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", ret);
+ if (num_vmids == -EINVAL) {
+ /* qcom,vmid is optional */
+ num_vmids = 0;
+ } else if (num_vmids < 0) {
+ dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
goto remove_cdev;
} else if (num_vmids > NUM_MAX_VMIDS) {
dev_warn(&pdev->dev,
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5cfabd5376cc..f9aef39cac2e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -36,8 +36,6 @@ source "drivers/staging/rtl8723bs/Kconfig"
source "drivers/staging/rtl8712/Kconfig"
-source "drivers/staging/r8188eu/Kconfig"
-
source "drivers/staging/rts5208/Kconfig"
source "drivers/staging/octeon/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f8c3aa9c2418..ffa70dda481d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
-obj-$(CONFIG_R8188EU) += r8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_VT6655) += vt6655/
diff --git a/drivers/staging/r8188eu/Kconfig b/drivers/staging/r8188eu/Kconfig
deleted file mode 100644
index f5fe423530f0..000000000000
--- a/drivers/staging/r8188eu/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config R8188EU
- tristate "Realtek RTL8188EU Wireless LAN NIC driver"
- depends on WLAN && USB && CFG80211
- depends on m
- select WIRELESS_EXT
- select WEXT_PRIV
- select LIB80211
- select LIB80211_CRYPT_WEP
- select LIB80211_CRYPT_CCMP
- help
- This option adds support for the Realtek RTL8188EU chipset, used in USB
- devices such as the ASUS USB-N10 Nano. This newer driver is based on GitHub
- sources for version v4.1.4_6773.20130222, and contains modifications for
- newer kernel features. If built as a module, it will be called r8188eu.
-
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
deleted file mode 100644
index fd494c2299e6..000000000000
--- a/drivers/staging/r8188eu/Makefile
+++ /dev/null
@@ -1,48 +0,0 @@
-
-r8188eu-y = \
- hal/HalHWImg8188E_MAC.o \
- hal/HalHWImg8188E_BB.o \
- hal/HalHWImg8188E_RF.o \
- hal/HalPhyRf_8188e.o \
- hal/HalPwrSeqCmd.o \
- hal/Hal8188ERateAdaptive.o \
- hal/hal_intf.o \
- hal/hal_com.o \
- hal/odm.o \
- hal/odm_HWConfig.o \
- hal/odm_RTL8188E.o \
- hal/rtl8188e_cmd.o \
- hal/rtl8188e_dm.o \
- hal/rtl8188e_hal_init.o \
- hal/rtl8188e_phycfg.o \
- hal/rtl8188e_rf6052.o \
- hal/rtl8188e_rxdesc.o \
- hal/rtl8188eu_xmit.o \
- hal/usb_halinit.o \
- hal/usb_ops_linux.o \
- os_dep/ioctl_linux.o \
- os_dep/os_intfs.o \
- os_dep/osdep_service.o \
- os_dep/usb_intf.o \
- os_dep/usb_ops_linux.o \
- core/rtw_ap.o \
- core/rtw_br_ext.o \
- core/rtw_cmd.o \
- core/rtw_efuse.o \
- core/rtw_fw.o \
- core/rtw_ieee80211.o \
- core/rtw_ioctl_set.o \
- core/rtw_iol.o \
- core/rtw_led.o \
- core/rtw_mlme.o \
- core/rtw_mlme_ext.o \
- core/rtw_pwrctrl.o \
- core/rtw_p2p.o \
- core/rtw_recv.o \
- core/rtw_rf.o \
- core/rtw_security.o \
- core/rtw_sta_mgt.o \
- core/rtw_wlan_util.o \
- core/rtw_xmit.o
-
-obj-$(CONFIG_R8188EU) := r8188eu.o
diff --git a/drivers/staging/r8188eu/TODO b/drivers/staging/r8188eu/TODO
deleted file mode 100644
index ab9d5d145b3b..000000000000
--- a/drivers/staging/r8188eu/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-To-do list:
-
-* Correct the coding style according to Linux guidelines; please read the document
- at https://www.kernel.org/doc/html/latest/process/coding-style.html.
-* Remove unnecessary debugging/printing macros; for those that are still needed
- use the proper kernel API (pr_debug(), dev_dbg(), netdev_dbg()).
-* Remove dead code such as unusued functions, variables, fields, etc..
-* Use in-kernel API and remove unnecessary wrappers where possible.
-* Fix bugs due to code that sleeps in atomic context.
-* Remove the HAL layer and migrate its functionality into the relevant parts of
- the driver.
-* Switch to use LIB80211.
-* Switch to use MAC80211.
-* Switch to use CFG80211.
-* Improve the error handling of various functions, particularly those that use
- existing kernel APIs.
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
deleted file mode 100644
index e0ca4b6e17cc..000000000000
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ /dev/null
@@ -1,1181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_AP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/ieee80211.h"
-#include "../include/rtl8188e_cmd.h"
-
-void init_mlme_ap_info(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- spin_lock_init(&pmlmepriv->bcn_update_lock);
-
- /* for ACL */
- rtw_init_queue(&pacl_list->acl_node_q);
-
- start_ap_mode(padapter);
-}
-
-void free_mlme_ap_info(struct adapter *padapter)
-{
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmlmepriv->update_bcn = false;
- pmlmeext->bstart_bss = false;
-
- rtw_sta_flush(padapter);
-
- pmlmeinfo->state = _HW_STATE_NOLINK_;
-
- /* free_assoc_sta_resources */
- rtw_free_all_stainfo(padapter);
-
- /* free bc/mc sta_info */
- psta = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-}
-
-static void update_BCNTIM(struct adapter *padapter)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
- unsigned char *pie = pnetwork_mlmeext->IEs;
- u8 *p, *dst_ie, *premainder_ie = NULL;
- u8 *pbackup_remainder_ie = NULL;
- __le16 tim_bitmap_le;
- uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
-
- /* update TIM IE */
-
- p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
- pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
- if (p && tim_ielen > 0) {
- tim_ielen += 2;
- premainder_ie = p + tim_ielen;
- tim_ie_offset = (int)(p - pie);
- remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
- /* append TIM IE from dst_ie offset */
- dst_ie = p;
- } else {
- tim_ielen = 0;
-
- /* calculate head_len */
- offset = _FIXED_IE_LENGTH_;
- offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
-
- /* get supported rates len */
- p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_,
- &tmp_len, (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_));
- if (p)
- offset += tmp_len + 2;
-
- /* DS Parameter Set IE, len = 3 */
- offset += 3;
-
- premainder_ie = pie + offset;
-
- remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
-
- /* append TIM IE from offset */
- dst_ie = pie + offset;
- }
-
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = kmalloc(remainder_ielen, GFP_ATOMIC);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
- }
- *dst_ie++ = _TIM_IE_;
-
- if ((pstapriv->tim_bitmap & 0xff00) && (pstapriv->tim_bitmap & 0x00fc))
- tim_ielen = 5;
- else
- tim_ielen = 4;
-
- *dst_ie++ = tim_ielen;
-
- *dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM period */
-
- if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
- *dst_ie++ = BIT(0);/* bitmap ctrl */
- else
- *dst_ie++ = 0;
-
- tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
-
- if (tim_ielen == 4) {
- *dst_ie++ = *(u8 *)&tim_bitmap_le;
- } else if (tim_ielen == 5) {
- memcpy(dst_ie, &tim_bitmap_le, 2);
- dst_ie += 2;
- }
-
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
-
- kfree(pbackup_remainder_ie);
- }
- offset = (uint)(dst_ie - pie);
- pnetwork_mlmeext->IELength = offset + remainder_ielen;
-
- set_tx_beacon_cmd(padapter);
-}
-
-static u8 chk_sta_is_alive(struct sta_info *psta)
-{
- u8 ret = false;
-
- if ((psta->sta_stats.last_rx_data_pkts + psta->sta_stats.last_rx_ctrl_pkts) ==
- (psta->sta_stats.rx_data_pkts + psta->sta_stats.rx_ctrl_pkts))
- ;
- else
- ret = true;
-
- sta_update_last_rx_pkts(psta);
-
- return ret;
-}
-
-void expire_timeout_chk(struct adapter *padapter)
-{
- struct list_head *phead, *plist;
- u8 updated = 0;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 chk_alive_num = 0;
- char chk_alive_list[NUM_STA];
- int i;
-
- spin_lock_bh(&pstapriv->auth_list_lock);
-
- phead = &pstapriv->auth_list;
- plist = phead->next;
-
- /* check auth_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, auth_list);
- plist = plist->next;
-
- if (psta->expire_to > 0) {
- psta->expire_to--;
- if (psta->expire_to == 0) {
- list_del_init(&psta->auth_list);
- pstapriv->auth_list_cnt--;
-
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- }
- }
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- psta = NULL;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
-
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = plist->next;
-
- if (chk_sta_is_alive(psta) || !psta->expire_to) {
- psta->expire_to = pstapriv->expire_to;
- psta->keep_alive_trycnt = 0;
- psta->under_exist_checking = 0;
- } else {
- psta->expire_to--;
- }
-
- if (psta->expire_to <= 0) {
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (padapter->registrypriv.wifi_spec == 1) {
- psta->expire_to = pstapriv->expire_to;
- continue;
- }
-
- if (psta->state & WIFI_SLEEP_STATE) {
- if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
- /* to check if alive by another methods if station is at ps mode. */
- psta->expire_to = pstapriv->expire_to;
- psta->state |= WIFI_STA_ALIVE_CHK_STATE;
-
- /* to update bcn with tim_bitmap for this station */
- pstapriv->tim_bitmap |= BIT(psta->aid);
- update_beacon(padapter, _TIM_IE_, NULL, false);
-
- if (!pmlmeext->active_keep_alive_check)
- continue;
- }
- }
- if (pmlmeext->active_keep_alive_check) {
- int stainfo_offset;
-
- stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
- if (stainfo_offset_valid(stainfo_offset))
- chk_alive_list[chk_alive_num++] = stainfo_offset;
- continue;
- }
-
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
-
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- } else {
- /* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
- if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt) &&
- padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME / pstapriv->asoc_list_cnt / 2)) {
- wakeup_sta_to_xmit(padapter, psta);
- }
- }
- }
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- if (chk_alive_num) {
- u8 backup_oper_channel = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- /* switch to correct channel of current network before issue keep-alive frames */
- if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
- backup_oper_channel = rtw_get_oper_ch(padapter);
- SelectChannel(padapter, pmlmeext->cur_channel);
- }
-
- /* issue null data to check sta alive*/
- for (i = 0; i < chk_alive_num; i++) {
- int ret = _FAIL;
-
- psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
-
- if (psta->state & WIFI_SLEEP_STATE)
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
- else
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
-
- psta->keep_alive_trycnt++;
- if (ret == _SUCCESS) {
- psta->expire_to = pstapriv->expire_to;
- psta->keep_alive_trycnt = 0;
- continue;
- } else if (psta->keep_alive_trycnt <= 3) {
- psta->expire_to = 1;
- continue;
- }
-
- psta->keep_alive_trycnt = 0;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- }
-
- if (backup_oper_channel > 0) /* back to the original operation channel */
- SelectChannel(padapter, backup_oper_channel);
- }
-
- associated_clients_update(padapter, updated);
-}
-
-void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
-{
- int i;
- u32 init_rate = 0;
- unsigned char sta_band = 0, raid, shortGIrate = false;
- unsigned char limit;
- unsigned int tx_ra_bitmap = 0;
- struct ht_priv *psta_ht = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
-
- if (psta)
- psta_ht = &psta->htpriv;
- else
- return;
-
- if (!(psta->state & _FW_LINKED))
- return;
-
- /* b/g mode ra_bitmap */
- for (i = 0; i < sizeof(psta->bssrateset); i++) {
- if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
- }
- /* n mode ra_bitmap */
- if (psta_ht->ht_option) {
- limit = 8; /* 1R */
-
- for (i = 0; i < limit; i++) {
- if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
- tx_ra_bitmap |= BIT(i + 12);
- }
-
- /* max short GI rate */
- shortGIrate = psta_ht->sgi;
- }
-
- if (pcur_network->Configuration.DSConfig > 14) {
- sta_band |= WIRELESS_INVALID;
- } else {
- if (tx_ra_bitmap & 0xffff000)
- sta_band |= WIRELESS_11_24N | WIRELESS_11G | WIRELESS_11B;
- else if (tx_ra_bitmap & 0xff0)
- sta_band |= WIRELESS_11G | WIRELESS_11B;
- else
- sta_band |= WIRELESS_11B;
- }
-
- psta->wireless_mode = sta_band;
-
- raid = networktype_to_raid(sta_band);
- init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
-
- if (psta->aid < NUM_STA) {
- u8 arg = 0;
-
- arg = psta->mac_id & 0x1f;
-
- arg |= BIT(7);/* support entry 2~31 */
-
- if (shortGIrate)
- arg |= BIT(5);
-
- tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
-
- /* bitmap[0:27] = tx_rate_bitmap */
- /* bitmap[28:31]= Rate Adaptive id */
- /* arg[0:4] = macid */
- /* arg[5] = Short GI */
- rtl8188e_Add_RateATid(padapter, tx_ra_bitmap, arg, rssi_level);
-
- if (shortGIrate)
- init_rate |= BIT(6);
-
- /* set ra_id, init_rate */
- psta->raid = raid;
- psta->init_rate = init_rate;
- }
-}
-
-void update_bmc_sta(struct adapter *padapter)
-{
- u32 init_rate = 0;
- unsigned char network_type, raid;
- int i, supportRateNum = 0;
- unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
-
- if (psta) {
- psta->aid = 0;/* default set to 0 */
- psta->mac_id = psta->aid + 1;
-
- psta->qos_option = 0;
- psta->htpriv.ht_option = false;
-
- psta->ieee8021x_blocked = 0;
-
- memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
-
- /* prepare for add_RATid */
- supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
- network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
-
- memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
- psta->bssratelen = supportRateNum;
-
- /* b/g mode ra_bitmap */
- for (i = 0; i < supportRateNum; i++) {
- if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
- }
-
- if (pcur_network->Configuration.DSConfig > 14) {
- network_type = WIRELESS_INVALID;
- } else {
- /* force to b mode */
- network_type = WIRELESS_11B;
- tx_ra_bitmap = 0xf;
- }
-
- raid = networktype_to_raid(network_type);
- init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
-
- /* ap mode */
- rtl8188e_SetHalODMVar(padapter, psta, true);
-
- {
- u8 arg = 0;
-
- arg = psta->mac_id & 0x1f;
- arg |= BIT(7);
- tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
-
- /* bitmap[0:27] = tx_rate_bitmap */
- /* bitmap[28:31]= Rate Adaptive id */
- /* arg[0:4] = macid */
- /* arg[5] = Short GI */
- rtl8188e_Add_RateATid(padapter, tx_ra_bitmap, arg, 0);
- }
- /* set ra_id, init_rate */
- psta->raid = raid;
- psta->init_rate = init_rate;
-
- rtw_sta_media_status_rpt(padapter, psta, 1);
-
- spin_lock_bh(&psta->lock);
- psta->state = _FW_LINKED;
- spin_unlock_bh(&psta->lock);
- }
-}
-
-/* notes: */
-/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
-/* MAC_ID = AID+1 for sta in ap/adhoc mode */
-/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
-/* MAC_ID = 0 for bssid for sta/ap/adhoc */
-/* CAM_ID = 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
-
-void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
- struct ht_priv *phtpriv_sta = &psta->htpriv;
- u16 sta_cap_info;
- u16 ap_cap_info;
-
- psta->mac_id = psta->aid + 1;
-
- /* ap mode */
- rtl8188e_SetHalODMVar(padapter, psta, true);
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- psta->ieee8021x_blocked = true;
- else
- psta->ieee8021x_blocked = false;
-
- /* update sta's cap */
-
- /* ERP */
- VCS_update(padapter, psta);
- /* HT related cap */
- if (phtpriv_sta->ht_option) {
- /* check if sta supports rx ampdu */
- phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
- sta_cap_info = le16_to_cpu(phtpriv_sta->ht_cap.cap_info);
- ap_cap_info = le16_to_cpu(phtpriv_ap->ht_cap.cap_info);
-
- /* check if sta support s Short GI */
- if ((sta_cap_info & ap_cap_info) &
- (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
- phtpriv_sta->sgi = true;
-
- /* bwmode */
- if ((sta_cap_info & ap_cap_info) & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
- phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
- }
- psta->qos_option = true;
- } else {
- phtpriv_sta->ampdu_enable = false;
- phtpriv_sta->sgi = false;
- phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
- phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
-
- /* Rx AMPDU */
- send_delba(padapter, 0, psta->hwaddr);/* recipient */
-
- /* TX AMPDU */
- send_delba(padapter, 1, psta->hwaddr);/* originator */
- phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
- phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
-
- /* todo: init other variables */
-
- memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
-
- spin_lock_bh(&psta->lock);
- psta->state |= _FW_LINKED;
- spin_unlock_bh(&psta->lock);
-}
-
-static void update_bcn_erpinfo_ie(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- unsigned char *p, *ie = pnetwork->IEs;
- u32 len = 0;
-
- if (!pmlmeinfo->ERP_enable)
- return;
-
- /* parsing ERP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len,
- (pnetwork->IELength - _BEACON_IE_OFFSET_));
- if (p && len > 0) {
- struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
-
- if (pmlmepriv->num_sta_non_erp == 1)
- pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION;
- else
- pIE->data[0] &= ~(RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION);
-
- if (pmlmepriv->num_sta_no_short_preamble > 0)
- pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
- else
- pIE->data[0] &= ~(RTW_ERP_INFO_BARKER_PREAMBLE_MODE);
-
- ERP_IE_handler(padapter, pIE);
- }
-}
-
-static void update_bcn_wps_ie(struct adapter *padapter)
-{
- u8 *pwps_ie = NULL, *pwps_ie_src;
- u8 *premainder_ie, *pbackup_remainder_ie = NULL;
- uint wps_ielen = 0, wps_offset, remainder_ielen;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- unsigned char *ie = pnetwork->IEs;
- u32 ielen = pnetwork->IELength;
-
- pwps_ie = rtw_get_wps_ie(ie + _FIXED_IE_LENGTH_, ielen - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
-
- if (!pwps_ie || wps_ielen == 0)
- return;
-
- wps_offset = (uint)(pwps_ie - ie);
-
- premainder_ie = pwps_ie + wps_ielen;
-
- remainder_ielen = ielen - wps_offset - wps_ielen;
-
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = kmalloc(remainder_ielen, GFP_ATOMIC);
- if (pbackup_remainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
- }
-
- pwps_ie_src = pmlmepriv->wps_beacon_ie;
- if (!pwps_ie_src)
- goto exit;
-
- wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
- if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
- memcpy(pwps_ie, pwps_ie_src, wps_ielen + 2);
- pwps_ie += (wps_ielen + 2);
-
- if (pbackup_remainder_ie)
- memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
-
- /* update IELength */
- pnetwork->IELength = wps_offset + (wps_ielen + 2) + remainder_ielen;
- }
-
-exit:
- kfree(pbackup_remainder_ie);
-}
-
-static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
-{
- if (!memcmp(WPS_OUI, oui, 4))
- update_bcn_wps_ie(padapter);
-}
-
-void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
-{
- struct mlme_priv *pmlmepriv;
- struct mlme_ext_priv *pmlmeext;
-
- if (!padapter)
- return;
-
- pmlmepriv = &padapter->mlmepriv;
- pmlmeext = &padapter->mlmeextpriv;
-
- if (!pmlmeext->bstart_bss)
- return;
-
- spin_lock_bh(&pmlmepriv->bcn_update_lock);
-
- switch (ie_id) {
- case _TIM_IE_:
- update_BCNTIM(padapter);
- break;
- case _ERPINFO_IE_:
- update_bcn_erpinfo_ie(padapter);
- break;
- case _VENDOR_SPECIFIC_IE_:
- update_bcn_vendor_spec_ie(padapter, oui);
- break;
- default:
- break;
- }
-
- pmlmepriv->update_bcn = true;
-
- spin_unlock_bh(&pmlmepriv->bcn_update_lock);
-
- if (tx)
- set_tx_beacon_cmd(padapter);
-}
-
-/* op_mode
- * Set to 0 (HT pure) under the following conditions
- * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
- * Set to 1 (HT non-member protection) if there may be non-HT STAs
- * in both the primary and the secondary channel
- * Set to 2 if only HT STAs are associated in BSS,
- * however and at least one 20 MHz HT STA is associated
- * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
- * (currently non-GF HT station is considered as non-HT STA also)
- */
-static int rtw_ht_operation_update(struct adapter *padapter)
-{
- u16 cur_op_mode, new_op_mode;
- int op_mode_changes = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
-
- if (pmlmepriv->htpriv.ht_option)
- return 0;
-
- if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
- pmlmepriv->num_sta_ht_no_gf) {
- pmlmepriv->ht_op_mode |=
- HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
- op_mode_changes++;
- } else if ((pmlmepriv->ht_op_mode &
- HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
- pmlmepriv->num_sta_ht_no_gf == 0) {
- pmlmepriv->ht_op_mode &=
- ~HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
- op_mode_changes++;
- }
-
- if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
- (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
- pmlmepriv->ht_op_mode |= HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
- op_mode_changes++;
- } else if ((pmlmepriv->ht_op_mode &
- HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
- (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
- pmlmepriv->ht_op_mode &=
- ~HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
- op_mode_changes++;
- }
-
- /* Note: currently we switch to the MIXED op mode if HT non-greenfield
- * station is associated. Probably it's a theoretical case, since
- * it looks like all known HT STAs support greenfield.
- */
- new_op_mode = 0;
- if (pmlmepriv->num_sta_no_ht ||
- (pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
- new_op_mode = OP_MODE_MIXED;
- else if ((le16_to_cpu(phtpriv_ap->ht_cap.cap_info) &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- pmlmepriv->num_sta_ht_20mhz)
- new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
- else if (pmlmepriv->olbc_ht)
- new_op_mode = OP_MODE_MAY_BE_LEGACY_STAS;
- else
- new_op_mode = OP_MODE_PURE;
-
- cur_op_mode = pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_OP_MODE_MASK;
- if (cur_op_mode != new_op_mode) {
- pmlmepriv->ht_op_mode &= ~HT_INFO_OPERATION_MODE_OP_MODE_MASK;
- pmlmepriv->ht_op_mode |= new_op_mode;
- op_mode_changes++;
- }
-
- return op_mode_changes;
-}
-
-void associated_clients_update(struct adapter *padapter, u8 updated)
-{
- /* update associated stations cap. */
- if (updated) {
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
-
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- VCS_update(padapter, psta);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- }
-}
-
-/* called > TSR LEVEL for USB or SDIO Interface*/
-void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
-{
- u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
- if (!psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 1;
-
- pmlmepriv->num_sta_no_short_preamble++;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 1)) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
- } else {
- if (psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 0;
-
- pmlmepriv->num_sta_no_short_preamble--;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0)) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
- }
-
- if (psta->flags & WLAN_STA_NONERP) {
- if (!psta->nonerp_set) {
- psta->nonerp_set = 1;
-
- pmlmepriv->num_sta_non_erp++;
-
- if (pmlmepriv->num_sta_non_erp == 1) {
- beacon_updated = true;
- update_beacon(padapter, _ERPINFO_IE_, NULL, true);
- }
- }
- } else {
- if (psta->nonerp_set) {
- psta->nonerp_set = 0;
-
- pmlmepriv->num_sta_non_erp--;
-
- if (pmlmepriv->num_sta_non_erp == 0) {
- beacon_updated = true;
- update_beacon(padapter, _ERPINFO_IE_, NULL, true);
- }
- }
- }
-
- if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
- if (!psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 1;
-
- pmlmepriv->num_sta_no_short_slot_time++;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1)) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
- } else {
- if (psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 0;
-
- pmlmepriv->num_sta_no_short_slot_time--;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0)) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
- }
-
- if (psta->flags & WLAN_STA_HT) {
- u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
-
- if (psta->no_ht_set) {
- psta->no_ht_set = 0;
- pmlmepriv->num_sta_no_ht--;
- }
-
- if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
- if (!psta->no_ht_gf_set) {
- psta->no_ht_gf_set = 1;
- pmlmepriv->num_sta_ht_no_gf++;
- }
- }
-
- if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH_20_40) == 0) {
- if (!psta->ht_20mhz_set) {
- psta->ht_20mhz_set = 1;
- pmlmepriv->num_sta_ht_20mhz++;
- }
- }
- } else {
- if (!psta->no_ht_set) {
- psta->no_ht_set = 1;
- pmlmepriv->num_sta_no_ht++;
- }
- }
-
- if (rtw_ht_operation_update(padapter) > 0) {
- update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
- update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
- }
-
- /* update associated stations cap. */
- associated_clients_update(padapter, beacon_updated);
-}
-
-u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
-{
- u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!psta)
- return beacon_updated;
-
- if (psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 0;
- pmlmepriv->num_sta_no_short_preamble--;
- if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
- pmlmepriv->num_sta_no_short_preamble == 0) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
-
- if (psta->nonerp_set) {
- psta->nonerp_set = 0;
- pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0) {
- beacon_updated = true;
- update_beacon(padapter, _ERPINFO_IE_, NULL, true);
- }
- }
-
- if (psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 0;
- pmlmepriv->num_sta_no_short_slot_time--;
- if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
- pmlmepriv->num_sta_no_short_slot_time == 0) {
- beacon_updated = true;
- update_beacon(padapter, 0xFF, NULL, true);
- }
- }
-
- if (psta->no_ht_gf_set) {
- psta->no_ht_gf_set = 0;
- pmlmepriv->num_sta_ht_no_gf--;
- }
-
- if (psta->no_ht_set) {
- psta->no_ht_set = 0;
- pmlmepriv->num_sta_no_ht--;
- }
-
- if (psta->ht_20mhz_set) {
- psta->ht_20mhz_set = 0;
- pmlmepriv->num_sta_ht_20mhz--;
- }
-
- if (rtw_ht_operation_update(padapter) > 0) {
- update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
- update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
- }
-
- /* update associated stations cap. */
-
- return beacon_updated;
-}
-
-void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
-}
-
-static void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
-}
-
-u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
- bool active, u16 reason)
-{
- u8 beacon_updated = false;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return beacon_updated;
-
- /* tear down Rx AMPDU */
- send_delba(padapter, 0, psta->hwaddr);/* recipient */
-
- /* tear down TX AMPDU */
- send_delba(padapter, 1, psta->hwaddr);/* originator */
- psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
- psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
-
- if (active)
- issue_deauth(padapter, psta->hwaddr, reason);
-
- /* clear cam entry / key */
- rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
-
- spin_lock_bh(&psta->lock);
- psta->state &= ~_FW_LINKED;
- spin_unlock_bh(&psta->lock);
-
- rtw_indicate_sta_disassoc_event(padapter, psta);
-
- report_del_sta_event(padapter, psta->hwaddr, reason);
-
- beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- return beacon_updated;
-}
-
-void rtw_sta_flush(struct adapter *padapter)
-{
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- return;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
-
- ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
-
- associated_clients_update(padapter, true);
-}
-
-/* called > TSR LEVEL for USB or SDIO Interface*/
-void sta_info_update(struct adapter *padapter, struct sta_info *psta)
-{
- int flags = psta->flags;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* update wmm cap. */
- if (WLAN_STA_WME & flags)
- psta->qos_option = 1;
- else
- psta->qos_option = 0;
-
- if (pmlmepriv->qospriv.qos_option == 0)
- psta->qos_option = 0;
-
- /* update 802.11n ht cap. */
- if (WLAN_STA_HT & flags) {
- psta->htpriv.ht_option = true;
- psta->qos_option = 1;
- } else {
- psta->htpriv.ht_option = false;
- }
-
- if (!pmlmepriv->htpriv.ht_option)
- psta->htpriv.ht_option = false;
-
- update_sta_info_apmode(padapter, psta);
-}
-
-void start_ap_mode(struct adapter *padapter)
-{
- int i;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- pmlmepriv->update_bcn = false;
-
- pmlmeext->bstart_bss = false;
-
- pmlmepriv->num_sta_non_erp = 0;
-
- pmlmepriv->num_sta_no_short_slot_time = 0;
-
- pmlmepriv->num_sta_no_short_preamble = 0;
-
- pmlmepriv->num_sta_ht_no_gf = 0;
- pmlmepriv->num_sta_no_ht = 0;
- pmlmepriv->num_sta_ht_20mhz = 0;
-
- pmlmepriv->olbc = false;
-
- pmlmepriv->olbc_ht = false;
-
- pmlmepriv->ht_op_mode = 0;
-
- for (i = 0; i < NUM_STA; i++)
- pstapriv->sta_aid[i] = NULL;
-
- pmlmepriv->wps_beacon_ie = NULL;
- pmlmepriv->wps_probe_resp_ie = NULL;
- pmlmepriv->wps_assoc_resp_ie = NULL;
-
- pmlmepriv->p2p_beacon_ie = NULL;
- pmlmepriv->p2p_probe_resp_ie = NULL;
-
- /* for ACL */
- INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
- pacl_list->num = 0;
- pacl_list->mode = 0;
- for (i = 0; i < NUM_ACL; i++) {
- INIT_LIST_HEAD(&pacl_list->aclnode[i].list);
- pacl_list->aclnode[i].valid = false;
- }
-}
-
-void stop_ap_mode(struct adapter *padapter)
-{
- struct list_head *phead, *plist;
- struct rtw_wlan_acl_node *paclnode;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct __queue *pacl_node_q = &pacl_list->acl_node_q;
-
- pmlmepriv->update_bcn = false;
- pmlmeext->bstart_bss = false;
-
- /* reset and init security priv , this can refine with rtw_reset_securitypriv */
- memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv));
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
-
- /* for ACL */
- spin_lock_bh(&pacl_node_q->lock);
- phead = get_list_head(pacl_node_q);
- plist = phead->next;
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
- if (paclnode->valid) {
- paclnode->valid = false;
-
- list_del_init(&paclnode->list);
-
- pacl_list->num--;
- }
- }
- spin_unlock_bh(&pacl_node_q->lock);
-
- rtw_sta_flush(padapter);
-
- /* free_assoc_sta_resources */
- rtw_free_all_stainfo(padapter);
-
- psta = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- rtw_init_bcmc_stainfo(padapter);
-
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
deleted file mode 100644
index a7c67014dde0..000000000000
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ /dev/null
@@ -1,658 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#define _RTW_BR_EXT_C_
-
-#include "../include/linux/if_arp.h"
-#include "../include/net/ip.h"
-#include "../include/linux/atalk.h"
-#include "../include/linux/udp.h"
-#include "../include/linux/if_pppox.h"
-
-#include "../include/drv_types.h"
-#include "../include/rtw_br_ext.h"
-#include "../include/usb_osintf.h"
-
-#ifndef csum_ipv6_magic
-#include "../include/net/ip6_checksum.h"
-#endif
-
-#include "../include/linux/ipv6.h"
-#include "../include/linux/icmpv6.h"
-#include "../include/net/ndisc.h"
-#include "../include/net/checksum.h"
-
-#define NAT25_IPV4 01
-#define NAT25_IPV6 02
-#define NAT25_IPX 03
-#define NAT25_APPLE 04
-#define NAT25_PPPOE 05
-
-#define RTL_RELAY_TAG_LEN (ETH_ALEN)
-#define TAG_HDR_LEN 4
-
-#define MAGIC_CODE 0x8186
-#define MAGIC_CODE_LEN 2
-#define WAIT_TIME_PPPOE 5 /* waiting time for pppoe server in sec */
-
-/*-----------------------------------------------------------------
- How database records network address:
- 0 1 2 3 4 5 6 7 8 9 10
- |----|----|----|----|----|----|----|----|----|----|----|
- IPv4 |type| | IP addr |
- IPX |type| Net addr | Node addr |
- IPX |type| Net addr |Sckt addr|
- Apple |type| Network |node|
- PPPoE |type| SID | AC MAC |
------------------------------------------------------------------*/
-
-/* Find a tag in pppoe frame and return the pointer */
-static unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type)
-{
- unsigned char *cur_ptr, *start_ptr;
- unsigned short tag_len, tag_type;
-
- start_ptr = (unsigned char *)ph->tag;
- cur_ptr = (unsigned char *)ph->tag;
- while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
- /* prevent un-alignment access */
- tag_type = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
- tag_len = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]);
- if (tag_type == type)
- return cur_ptr;
- cur_ptr = cur_ptr + TAG_HDR_LEN + tag_len;
- }
- return NULL;
-}
-
-static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
-{
- struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- int data_len;
-
- data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
- if (skb_tailroom(skb) < data_len)
- return -1;
-
- skb_put(skb, data_len);
- /* have a room for new tag */
- memmove(((unsigned char *)ph->tag + data_len), (unsigned char *)ph->tag, ntohs(ph->length));
- ph->length = htons(ntohs(ph->length) + data_len);
- memcpy((unsigned char *)ph->tag, tag, data_len);
- return data_len;
-}
-
-static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
-{
- int tail_len;
- unsigned long end, tail;
-
- if ((src + len) > skb_tail_pointer(skb) || skb->len < len)
- return -1;
-
- tail = (unsigned long)skb_tail_pointer(skb);
- end = (unsigned long)src + len;
- if (tail < end)
- return -1;
-
- tail_len = (int)(tail - end);
- if (tail_len > 0)
- memmove(src, src + len, tail_len);
-
- skb_trim(skb, skb->len - len);
- return 0;
-}
-
-static int __nat25_has_expired(struct nat25_network_db_entry *fdb)
-{
- if (time_before_eq(fdb->ageing_timer, jiffies - NAT25_AGEING_TIME * HZ))
- return 1;
-
- return 0;
-}
-
-static void __nat25_generate_ipv4_network_addr(unsigned char *addr,
- unsigned int *ip_addr)
-{
- memset(addr, 0, MAX_NETWORK_ADDR_LEN);
-
- addr[0] = NAT25_IPV4;
- memcpy(addr + 7, (unsigned char *)ip_addr, 4);
-}
-
-static void __nat25_generate_pppoe_network_addr(unsigned char *addr,
- unsigned char *ac_mac, __be16 *sid)
-{
- memset(addr, 0, MAX_NETWORK_ADDR_LEN);
-
- addr[0] = NAT25_PPPOE;
- memcpy(addr + 1, (unsigned char *)sid, 2);
- memcpy(addr + 3, (unsigned char *)ac_mac, 6);
-}
-
-static void __nat25_generate_ipv6_network_addr(unsigned char *addr,
- unsigned int *ip_addr)
-{
- memset(addr, 0, MAX_NETWORK_ADDR_LEN);
-
- addr[0] = NAT25_IPV6;
- memcpy(addr + 1, (unsigned char *)ip_addr, 16);
-}
-
-static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
-{
- while (len > 0) {
- if (*data == tag && *(data + 1) == len8b && len >= len8b * 8)
- return data + 2;
-
- len -= (*(data + 1)) * 8;
- data += (*(data + 1)) * 8;
- }
- return NULL;
-}
-
-static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char *replace_mac)
-{
- struct icmp6hdr *icmphdr = (struct icmp6hdr *)data;
- unsigned char *mac;
-
- if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
- if (len >= 8) {
- mac = scan_tlv(&data[8], len - 8, 1, 1);
- if (mac) {
- memcpy(mac, replace_mac, 6);
- return 1;
- }
- }
- } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
- if (len >= 16) {
- mac = scan_tlv(&data[16], len - 16, 1, 1);
- if (mac) {
- memcpy(mac, replace_mac, 6);
- return 1;
- }
- }
- } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
- if (len >= 24) {
- mac = scan_tlv(&data[24], len - 24, 1, 1);
- if (mac) {
- memcpy(mac, replace_mac, 6);
- return 1;
- }
- }
- } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
- if (len >= 24) {
- mac = scan_tlv(&data[24], len - 24, 2, 1);
- if (mac) {
- memcpy(mac, replace_mac, 6);
- return 1;
- }
- }
- } else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
- if (len >= 40) {
- mac = scan_tlv(&data[40], len - 40, 2, 1);
- if (mac) {
- memcpy(mac, replace_mac, 6);
- return 1;
- }
- }
- }
- return 0;
-}
-
-static int __nat25_network_hash(unsigned char *addr)
-{
- if (addr[0] == NAT25_IPV4) {
- unsigned long x;
-
- x = addr[7] ^ addr[8] ^ addr[9] ^ addr[10];
-
- return x & (NAT25_HASH_SIZE - 1);
- } else if (addr[0] == NAT25_IPX) {
- unsigned long x;
-
- x = addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^ addr[5] ^
- addr[6] ^ addr[7] ^ addr[8] ^ addr[9] ^ addr[10];
-
- return x & (NAT25_HASH_SIZE - 1);
- } else if (addr[0] == NAT25_APPLE) {
- unsigned long x;
-
- x = addr[1] ^ addr[2] ^ addr[3];
-
- return x & (NAT25_HASH_SIZE - 1);
- } else if (addr[0] == NAT25_PPPOE) {
- unsigned long x;
-
- x = addr[0] ^ addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^
- addr[5] ^ addr[6] ^ addr[7] ^ addr[8];
-
- return x & (NAT25_HASH_SIZE - 1);
- } else if (addr[0] == NAT25_IPV6) {
- unsigned long x;
-
- x = addr[1] ^ addr[2] ^ addr[3] ^ addr[4] ^ addr[5] ^ addr[6] ^
- addr[7] ^ addr[8] ^ addr[9] ^ addr[10] ^ addr[11] ^ addr[12] ^
- addr[13] ^ addr[14] ^ addr[15] ^ addr[16];
-
- return x & (NAT25_HASH_SIZE - 1);
- } else {
- unsigned long x = 0;
- int i;
-
- for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++)
- x ^= addr[i];
-
- return x & (NAT25_HASH_SIZE - 1);
- }
-}
-
-static void __network_hash_link(struct adapter *priv,
- struct nat25_network_db_entry *ent, int hash)
-{
- /* Caller must spin_lock already! */
- ent->next_hash = priv->nethash[hash];
- if (ent->next_hash)
- ent->next_hash->pprev_hash = &ent->next_hash;
- priv->nethash[hash] = ent;
- ent->pprev_hash = &priv->nethash[hash];
-}
-
-static void __network_hash_unlink(struct nat25_network_db_entry *ent)
-{
- /* Caller must spin_lock already! */
- *ent->pprev_hash = ent->next_hash;
- if (ent->next_hash)
- ent->next_hash->pprev_hash = ent->pprev_hash;
- ent->next_hash = NULL;
- ent->pprev_hash = NULL;
-}
-
-static void __nat25_db_network_insert(struct adapter *priv,
- unsigned char *mac_addr, unsigned char *addr)
-{
- struct nat25_network_db_entry *db;
- int hash;
-
- spin_lock_bh(&priv->br_ext_lock);
- hash = __nat25_network_hash(addr);
- db = priv->nethash[hash];
- while (db) {
- if (!memcmp(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN)) {
- memcpy(db->macAddr, mac_addr, ETH_ALEN);
- db->ageing_timer = jiffies;
- spin_unlock_bh(&priv->br_ext_lock);
- return;
- }
- db = db->next_hash;
- }
- db = kmalloc(sizeof(*db), GFP_ATOMIC);
- if (!db) {
- spin_unlock_bh(&priv->br_ext_lock);
- return;
- }
- memcpy(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN);
- memcpy(db->macAddr, mac_addr, ETH_ALEN);
- atomic_set(&db->use_count, 1);
- db->ageing_timer = jiffies;
-
- __network_hash_link(priv, db, hash);
-
- spin_unlock_bh(&priv->br_ext_lock);
-}
-
-/*
- * NAT2.5 interface
- */
-
-void nat25_db_cleanup(struct adapter *priv)
-{
- int i;
-
- spin_lock_bh(&priv->br_ext_lock);
-
- for (i = 0; i < NAT25_HASH_SIZE; i++) {
- struct nat25_network_db_entry *f;
-
- f = priv->nethash[i];
- while (f) {
- struct nat25_network_db_entry *g;
-
- g = f->next_hash;
- if (priv->scdb_entry == f) {
- memset(priv->scdb_mac, 0, ETH_ALEN);
- memset(priv->scdb_ip, 0, 4);
- priv->scdb_entry = NULL;
- }
- __network_hash_unlink(f);
- kfree(f);
- f = g;
- }
- }
- spin_unlock_bh(&priv->br_ext_lock);
-}
-
-void nat25_db_expire(struct adapter *priv)
-{
- int i;
-
- spin_lock_bh(&priv->br_ext_lock);
-
- for (i = 0; i < NAT25_HASH_SIZE; i++) {
- struct nat25_network_db_entry *f;
-
- f = priv->nethash[i];
- while (f) {
- struct nat25_network_db_entry *g;
-
- g = f->next_hash;
- if (__nat25_has_expired(f)) {
- if (atomic_dec_and_test(&f->use_count)) {
- if (priv->scdb_entry == f) {
- memset(priv->scdb_mac, 0, ETH_ALEN);
- memset(priv->scdb_ip, 0, 4);
- priv->scdb_entry = NULL;
- }
- __network_hash_unlink(f);
- kfree(f);
- }
- }
- f = g;
- }
- }
- spin_unlock_bh(&priv->br_ext_lock);
-}
-
-int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
-{
- unsigned short protocol;
- unsigned char addr[MAX_NETWORK_ADDR_LEN];
- unsigned int tmp;
-
- if (!skb)
- return -1;
-
- if ((method <= NAT25_MIN) || (method >= NAT25_MAX))
- return -1;
-
- protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN)));
-
- /*---------------------------------------------------*/
- /* Handle IP frame */
- /*---------------------------------------------------*/
- if (protocol == ETH_P_IP) {
- struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
-
- if (((unsigned char *)(iph) + (iph->ihl << 2)) >= (skb->data + ETH_HLEN + skb->len))
- return -1;
-
- switch (method) {
- case NAT25_CHECK:
- return -1;
- case NAT25_INSERT:
- /* some multicast with source IP is all zero, maybe other case is illegal */
- /* in class A, B, C, host address is all zero or all one is illegal */
- if (iph->saddr == 0)
- return 0;
- tmp = be32_to_cpu(iph->saddr);
- __nat25_generate_ipv4_network_addr(addr, &tmp);
- /* record source IP address and , source mac address into db */
- __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr);
- return 0;
- default:
- return -1;
- }
- } else if (protocol == ETH_P_ARP) {
- /*---------------------------------------------------*/
- /* Handle ARP frame */
- /*---------------------------------------------------*/
- struct arphdr *arp = (struct arphdr *)(skb->data + ETH_HLEN);
- unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- unsigned int *sender;
-
- if (arp->ar_pro != htons(ETH_P_IP))
- return -1;
-
- switch (method) {
- case NAT25_CHECK:
- return 0; /* skb_copy for all ARP frame */
- case NAT25_INSERT:
- /* change to ARP sender mac address to wlan STA address */
- memcpy(arp_ptr, GET_MY_HWADDR(priv), ETH_ALEN);
- arp_ptr += arp->ar_hln;
- sender = (unsigned int *)arp_ptr;
- __nat25_generate_ipv4_network_addr(addr, sender);
- __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr);
- return 0;
- default:
- return -1;
- }
- } else if ((protocol == ETH_P_PPP_DISC) ||
- (protocol == ETH_P_PPP_SES)) {
- /*---------------------------------------------------*/
- /* Handle PPPoE frame */
- /*---------------------------------------------------*/
- struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
- __be16 *pMagic;
-
- switch (method) {
- case NAT25_CHECK:
- if (ph->sid == 0)
- return 0;
- return 1;
- case NAT25_INSERT:
- if (ph->sid == 0) { /* Discovery phase according to tag */
- if (ph->code == PADI_CODE || ph->code == PADR_CODE) {
- if (priv->ethBrExtInfo.addPPPoETag) {
- struct pppoe_tag *tag, *pOldTag;
- unsigned char tag_buf[40];
- int old_tag_len = 0;
-
- tag = (struct pppoe_tag *)tag_buf;
- pOldTag = (struct pppoe_tag *)__nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
- if (pOldTag) { /* if SID existed, copy old value and delete it */
- old_tag_len = ntohs(pOldTag->tag_len);
- if (old_tag_len +
- TAG_HDR_LEN +
- MAGIC_CODE_LEN +
- RTL_RELAY_TAG_LEN >
- sizeof(tag_buf))
- return -1;
-
- memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN,
- pOldTag->tag_data, old_tag_len);
-
- if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0)
- return -1;
-
- ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len);
- }
-
- tag->tag_type = PTT_RELAY_SID;
- tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len);
-
- /* insert the magic_code+client mac in relay tag */
- pMagic = (__be16 *)tag->tag_data;
- *pMagic = htons(MAGIC_CODE);
- memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN);
-
- /* Add relay tag */
- if (__nat25_add_pppoe_tag(skb, tag) < 0)
- return -1;
- } else { /* not add relay tag */
- if (priv->pppoe_connection_in_progress &&
- memcmp(skb->data + ETH_ALEN,
- priv->pppoe_addr,
- ETH_ALEN))
- return -2;
-
- if (priv->pppoe_connection_in_progress == 0)
- memcpy(priv->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN);
-
- priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
- }
- } else {
- return -1;
- }
- } else { /* session phase */
- __nat25_generate_pppoe_network_addr(addr, skb->data, &ph->sid);
-
- __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr);
-
- if (!priv->ethBrExtInfo.addPPPoETag &&
- priv->pppoe_connection_in_progress &&
- !memcmp(skb->data + ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
- priv->pppoe_connection_in_progress = 0;
- }
- return 0;
- default:
- return -1;
- }
- } else if (protocol == 0x888e) {
- /*---------------------------------------------------*/
- /* Handle EAP frame */
- /*---------------------------------------------------*/
- switch (method) {
- case NAT25_CHECK:
- return -1;
- case NAT25_INSERT:
- return 0;
- default:
- return -1;
- }
- } else if ((protocol == 0xe2ae) || (protocol == 0xe2af)) {
- /*---------------------------------------------------*/
- /* Handle C-Media proprietary frame */
- /*---------------------------------------------------*/
- switch (method) {
- case NAT25_CHECK:
- return -1;
- case NAT25_INSERT:
- return 0;
- default:
- return -1;
- }
- } else if (protocol == ETH_P_IPV6) {
- /*------------------------------------------------*/
- /* Handle IPV6 frame */
- /*------------------------------------------------*/
- struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN);
-
- if (sizeof(*iph) >= (skb->len - ETH_HLEN))
- return -1;
-
- switch (method) {
- case NAT25_CHECK:
- if (skb->data[0] & 1)
- return 0;
- return -1;
- case NAT25_INSERT:
- if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
- __nat25_generate_ipv6_network_addr(addr, (unsigned int *)&iph->saddr);
- __nat25_db_network_insert(priv, skb->data + ETH_ALEN, addr);
-
- if (iph->nexthdr == IPPROTO_ICMPV6 &&
- skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
- if (update_nd_link_layer_addr(skb->data + ETH_HLEN + sizeof(*iph),
- skb->len - ETH_HLEN - sizeof(*iph), GET_MY_HWADDR(priv))) {
- struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
- hdr->icmp6_cksum = 0;
- hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
- be16_to_cpu(iph->payload_len),
- IPPROTO_ICMPV6,
- csum_partial((__u8 *)hdr,
- be16_to_cpu(iph->payload_len),
- 0));
- }
- }
- }
- return 0;
- default:
- return -1;
- }
- }
- return -1;
-}
-
-#define SERVER_PORT 67
-#define CLIENT_PORT 68
-#define DHCP_MAGIC 0x63825363
-#define BROADCAST_FLAG 0x8000
-
-struct dhcpMessage {
- u_int8_t op;
- u_int8_t htype;
- u_int8_t hlen;
- u_int8_t hops;
- u_int32_t xid;
- __be16 secs;
- __be16 flags;
- __be32 ciaddr;
- __be32 yiaddr;
- __be32 siaddr;
- __be32 giaddr;
- u_int8_t chaddr[16];
- u_int8_t sname[64];
- u_int8_t file[128];
- __be32 cookie;
- u_int8_t options[308]; /* 312 - cookie */
-};
-
-void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb)
-{
- if (!skb)
- return;
-
- if (!priv->ethBrExtInfo.dhcp_bcst_disable) {
- __be16 protocol = *((__be16 *)(skb->data + 2 * ETH_ALEN));
-
- if (protocol == htons(ETH_P_IP)) { /* IP */
- struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
-
- if (iph->protocol == IPPROTO_UDP) { /* UDP */
- struct udphdr *udph = (void *)iph + (iph->ihl << 2);
-
- if ((udph->source == htons(CLIENT_PORT)) &&
- (udph->dest == htons(SERVER_PORT))) { /* DHCP request */
- struct dhcpMessage *dhcph = (void *)udph + sizeof(struct udphdr);
- u32 cookie = be32_to_cpu(dhcph->cookie);
-
- if (cookie == DHCP_MAGIC) { /* match magic word */
- if (!(dhcph->flags & htons(BROADCAST_FLAG))) {
- /* if not broadcast */
- register int sum = 0;
-
- /* or BROADCAST flag */
- dhcph->flags |= htons(BROADCAST_FLAG);
- /* recalculate checksum */
- sum = ~(udph->check) & 0xffff;
- sum += be16_to_cpu(dhcph->flags);
- while (sum >> 16)
- sum = (sum & 0xffff) + (sum >> 16);
- udph->check = ~sum;
- }
- }
- }
- }
- }
- }
-}
-
-void *scdb_findEntry(struct adapter *priv, unsigned char *ip_addr)
-{
- unsigned char addr[MAX_NETWORK_ADDR_LEN];
- struct nat25_network_db_entry *db;
- int hash;
-
- __nat25_generate_ipv4_network_addr(addr, (unsigned int *)ip_addr);
- hash = __nat25_network_hash(addr);
- db = priv->nethash[hash];
- while (db) {
- if (!memcmp(db->networkAddr, addr, MAX_NETWORK_ADDR_LEN))
- return (void *)db;
-
- db = db->next_hash;
- }
-
- return NULL;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
deleted file mode 100644
index ca9e3d4ee7f4..000000000000
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ /dev/null
@@ -1,1529 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_CMD_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_br_ext.h"
-#include "../include/rtw_mlme_ext.h"
-#include "../include/rtl8188e_dm.h"
-
-/* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
- * No irqsave is necessary.
- */
-
-static void c2h_wk_callback(struct work_struct *work);
-
-void rtw_free_evt_priv(struct evt_priv *pevtpriv)
-{
- cancel_work_sync(&pevtpriv->c2h_wk);
- while (pevtpriv->c2h_wk_alive)
- msleep(10);
-
- while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
- void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
- if (c2h && c2h != (void *)pevtpriv)
- kfree(c2h);
- }
-}
-
-int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- init_completion(&pcmdpriv->enqueue_cmd);
- /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
- init_completion(&pcmdpriv->start_cmd_thread);
- init_completion(&pcmdpriv->stop_cmd_thread);
-
- rtw_init_queue(&pcmdpriv->cmd_queue);
-
- /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
-
- pcmdpriv->cmd_allocated_buf = kzalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
- GFP_KERNEL);
-
- if (!pcmdpriv->cmd_allocated_buf)
- return -ENOMEM;
-
- pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ - 1));
-
- pcmdpriv->rsp_allocated_buf = kzalloc(MAX_RSPSZ + 4, GFP_KERNEL);
-
- if (!pcmdpriv->rsp_allocated_buf) {
- kfree(pcmdpriv->cmd_allocated_buf);
- return -ENOMEM;
- }
-
- pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
-
- pcmdpriv->cmd_done_cnt = 0;
- pcmdpriv->rsp_cnt = 0;
-
- return 0;
-}
-
-int rtw_init_evt_priv(struct evt_priv *pevtpriv)
-{
- /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- atomic_set(&pevtpriv->event_seq, 0);
-
- INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
- pevtpriv->c2h_wk_alive = false;
- pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
- if (!pevtpriv->c2h_queue)
- return -ENOMEM;
-
- return 0;
-}
-
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
-{
- if (pcmdpriv) {
- kfree(pcmdpriv->cmd_allocated_buf);
- kfree(pcmdpriv->rsp_allocated_buf);
- }
-}
-
-static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
-{
- u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
-
- if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
- bAllow = true;
-
- if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
- !pcmdpriv->cmdthd_running) /* com_thread not running */
- return _FAIL;
- return _SUCCESS;
-}
-
-u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
-{
- unsigned long flags;
- struct adapter *padapter = pcmdpriv->padapter;
-
- if (!cmd_obj)
- return _FAIL;
-
- cmd_obj->padapter = padapter;
-
- if (rtw_cmd_filter(pcmdpriv, cmd_obj) == _FAIL) {
- rtw_free_cmd_obj(cmd_obj);
- return _FAIL;
- }
-
- spin_lock_irqsave(&pcmdpriv->cmd_queue.lock, flags);
- list_add_tail(&cmd_obj->list, &pcmdpriv->cmd_queue.queue);
- spin_unlock_irqrestore(&pcmdpriv->cmd_queue.lock, flags);
-
- complete(&pcmdpriv->enqueue_cmd);
- return _SUCCESS;
-}
-
-struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
-{
- struct cmd_obj *obj;
- struct __queue *queue = &pcmdpriv->cmd_queue;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
- if (list_empty(&queue->queue)) {
- obj = NULL;
- } else {
- obj = container_of((&queue->queue)->next, struct cmd_obj, list);
- list_del_init(&obj->list);
- }
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return obj;
-}
-
-void rtw_free_cmd_obj(struct cmd_obj *pcmd)
-{
-
- if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
- /* free parmbuf in cmd_obj */
- kfree(pcmd->parmbuf);
- }
-
- if (pcmd->rsp) {
- if (pcmd->rspsz != 0) {
- /* free rsp in cmd_obj */
- kfree(pcmd->rsp);
- }
- }
-
- /* free cmd_obj */
- kfree(pcmd);
-
-}
-
-int rtw_cmd_thread(void *context)
-{
- u8 ret;
- struct cmd_obj *pcmd;
- u8 *pcmdbuf;
- u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
- void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
- struct adapter *padapter = (struct adapter *)context;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmdbuf = pcmdpriv->cmd_buf;
-
- pcmdpriv->cmdthd_running = true;
- complete(&pcmdpriv->start_cmd_thread);
-
- while (1) {
- wait_for_completion(&pcmdpriv->enqueue_cmd);
-
-_next:
- if (padapter->bDriverStopped ||
- padapter->bSurpriseRemoved)
- break;
-
- pcmd = rtw_dequeue_cmd(pcmdpriv);
- if (!pcmd)
- continue;
-
- if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
- pcmd->res = H2C_DROPPED;
- goto post_process;
- }
-
- pcmd->cmdsz = round_up(pcmd->cmdsz, 4);
-
- memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
-
- if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) {
- cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
-
- if (cmd_hdl) {
- ret = cmd_hdl(pcmd->padapter, pcmdbuf);
- pcmd->res = ret;
- }
- } else {
- pcmd->res = H2C_PARAMETERS_ERROR;
- }
-
- cmd_hdl = NULL;
-
-post_process:
-
- /* call callback function for post-processed */
- if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
- pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- rtw_free_cmd_obj(pcmd);
- else
- /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
- pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
- } else {
- rtw_free_cmd_obj(pcmd);
- }
-
- flush_signals_thread();
-
- goto _next;
- }
- pcmdpriv->cmdthd_running = false;
-
- /* free all cmd_obj resources */
- do {
- pcmd = rtw_dequeue_cmd(pcmdpriv);
- if (!pcmd)
- break;
-
- rtw_free_cmd_obj(pcmd);
- } while (1);
-
- complete(&pcmdpriv->stop_cmd_thread);
-
- return 0;
-}
-
-/* rtw_sitesurvey_cmd(~)
- * ### NOTE:#### (!!!!)
- * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
- */
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num)
-{
- u8 res = _FAIL;
- struct cmd_obj *ph2c;
- struct sitesurvey_parm *psurveyPara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c)
- return _FAIL;
-
- psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
- if (!psurveyPara) {
- kfree(ph2c);
- return _FAIL;
- }
-
- rtw_free_network_queue(padapter, false);
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
-
- /* psurveyPara->bsslimit = 48; */
- psurveyPara->scan_mode = pmlmepriv->scan_mode;
-
- /* prepare ssid list */
- if (ssid) {
- int i;
- for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (ssid[i].SsidLength) {
- memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
- psurveyPara->ssid_num++;
- }
- }
- }
-
- set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
- if (res == _SUCCESS) {
- pmlmepriv->scan_start_time = jiffies;
-
- _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
-
- rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
-
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
- } else {
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- }
-
- return res;
-}
-
-int rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
-{
- struct cmd_obj *ph2c;
- struct setdatarate_parm *pbsetdataratepara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c)
- return -ENOMEM;
-
- pbsetdataratepara = kzalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
- if (!pbsetdataratepara) {
- kfree(ph2c);
- return -ENOMEM;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
- pbsetdataratepara->mac_id = 5;
- memcpy(pbsetdataratepara->datarates, rateset, NumRates);
- if (rtw_enqueue_cmd(pcmdpriv, ph2c) == _FAIL)
- return -EPERM;
-
- return 0;
-}
-
-void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
-
-
- kfree(pcmd->parmbuf);
- kfree(pcmd);
-}
-
-u8 rtw_createbss_cmd(struct adapter *padapter)
-{
- struct cmd_obj *pcmd;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
- u8 res = _SUCCESS;
-
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
- pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (!pcmd) {
- res = _FAIL;
- goto exit;
- }
-
- INIT_LIST_HEAD(&pcmd->list);
- pcmd->cmdcode = _CreateBss_CMD_;
- pcmd->parmbuf = (unsigned char *)pdev_network;
- pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
- pdev_network->Length = pcmd->cmdsz;
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
-exit:
-
- return res;
-}
-
-u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
-{
- u8 res = _SUCCESS;
- uint t_len = 0;
- struct wlan_bssid_ex *psecnetwork;
- struct cmd_obj *pcmd;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- enum ndis_802_11_network_infra ndis_network_mode = pnetwork->network.InfrastructureMode;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
- pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (!pcmd) {
- res = _FAIL;
- goto exit;
- }
- /* for IEs is fix buf size */
- t_len = sizeof(struct wlan_bssid_ex);
-
- /* for hidden ap to set fw_state here */
- if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
- switch (ndis_network_mode) {
- case Ndis802_11IBSS:
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- break;
- case Ndis802_11Infrastructure:
- set_fwstate(pmlmepriv, WIFI_STATION_STATE);
- break;
- case Ndis802_11APMode:
- case Ndis802_11AutoUnknown:
- case Ndis802_11InfrastructureMax:
- break;
- }
- }
-
- psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
- if (!psecnetwork) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
-
- memset(psecnetwork, 0, t_len);
-
- memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
-
- psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
-
- if (psecnetwork->IELength - 12 < 255)
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength - 12);
- else
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], 255);
-
- psecnetwork->IELength = 0;
- /* Added by Albert 2009/02/18 */
- /* If the driver wants to use the bssid to create the connection. */
- /* If not, we have to copy the connecting AP's MAC address to it so that */
- /* the driver just has the bssid information for PMKIDList searching. */
-
- if (!pmlmepriv->assoc_by_bssid)
- memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
-
- psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
-
- pqospriv->qos_option = 0;
-
- if (pregistrypriv->wmm_enable) {
- u32 tmp_len;
-
- tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength);
-
- if (psecnetwork->IELength != tmp_len) {
- psecnetwork->IELength = tmp_len;
- pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
- } else {
- pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
- }
- }
-
- phtpriv->ht_option = false;
- if (pregistrypriv->ht_enable) {
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
- if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
- (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
- (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
- /* rtw_restructure_ht_ie */
- rtw_restructure_ht_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0],
- pnetwork->network.IELength, &psecnetwork->IELength);
- }
- }
-
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.IEs, pnetwork->network.IELength);
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
- padapter->pwrctrlpriv.smart_ps = 0;
- else
- padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
-
- pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
-
- INIT_LIST_HEAD(&pcmd->list);
- pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
- pcmd->parmbuf = (unsigned char *)psecnetwork;
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
-
-exit:
-
- return res;
-}
-
-u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */
-{
- struct cmd_obj *cmdobj = NULL;
- struct disconnect_parm *param = NULL;
- struct cmd_priv *cmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- /* prepare cmd parameter */
- param = kzalloc(sizeof(*param), GFP_ATOMIC);
- if (!param) {
- res = _FAIL;
- goto exit;
- }
- param->deauth_timeout_ms = deauth_timeout_ms;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
- if (!cmdobj) {
- res = _FAIL;
- kfree(param);
- goto exit;
- }
- init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
- res = rtw_enqueue_cmd(cmdpriv, cmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS)
- res = _FAIL;
- kfree(param);
- }
-
-exit:
-
- return res;
-}
-
-u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype)
-{
- struct cmd_obj *ph2c;
- struct setopmode_parm *psetop;
-
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
- if (!ph2c) {
- res = false;
- goto exit;
- }
- psetop = kzalloc(sizeof(*psetop), GFP_KERNEL);
-
- if (!psetop) {
- kfree(ph2c);
- res = false;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
- psetop->mode = (u8)networktype;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
-{
- struct cmd_obj *ph2c;
- struct set_stakey_parm *psetstakey_para;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct set_stakey_rsp *psetstakey_rsp = NULL;
-
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sta_info *sta = (struct sta_info *)psta;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_para = kzalloc(sizeof(*psetstakey_para), GFP_KERNEL);
- if (!psetstakey_para) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp), GFP_KERNEL);
- if (!psetstakey_rsp) {
- kfree(ph2c);
- kfree(psetstakey_para);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *)psetstakey_rsp;
- ph2c->rspsz = sizeof(struct set_stakey_rsp);
-
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
- else
- GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
-
- if (unicast_key)
- memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
- else
- memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
-
- /* jeff: set this because at least sw key is ready */
- padapter->securitypriv.busetkipkey = true;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct set_stakey_parm *psetstakey_para;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct set_stakey_rsp *psetstakey_rsp = NULL;
- struct sta_info *sta = (struct sta_info *)psta;
- u8 res = _SUCCESS;
-
- if (!enqueue) {
- clear_cam_entry(padapter, entry);
- } else {
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_para = kzalloc(sizeof(*psetstakey_para),
- GFP_ATOMIC);
- if (!psetstakey_para) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp),
- GFP_ATOMIC);
- if (!psetstakey_rsp) {
- kfree(ph2c);
- kfree(psetstakey_para);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *)psetstakey_rsp;
- ph2c->rspsz = sizeof(struct set_stakey_rsp);
-
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
-
- psetstakey_para->algorithm = _NO_PRIVACY_;
-
- psetstakey_para->id = entry;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
- }
-exit:
-
- return res;
-}
-
-u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
-{
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct cmd_obj *ph2c;
- struct addBaReq_parm *paddbareq_parm;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- paddbareq_parm = kzalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
- if (!paddbareq_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- paddbareq_parm->tid = tid;
- memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
-
- /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
- pdrvextra_cmd_parm->type_size = 0;
- pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
-{
- struct cmd_obj *pcmdobj;
- struct SetChannelPlan_param *setChannelPlan_param;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- /* check input parameter */
- if (!rtw_is_channel_plan_valid(chplan)) {
- res = _FAIL;
- goto exit;
- }
-
- /* prepare cmd parameter */
- setChannelPlan_param = kzalloc(sizeof(*setChannelPlan_param),
- GFP_KERNEL);
- if (!setChannelPlan_param) {
- res = _FAIL;
- goto exit;
- }
- setChannelPlan_param->channel_plan = chplan;
-
- /* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = kzalloc(sizeof(*pcmdobj), GFP_KERNEL);
- if (!pcmdobj) {
- kfree(setChannelPlan_param);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
-
- /* do something based on res... */
- if (res == _SUCCESS)
- padapter->mlmepriv.ChannelPlan = chplan;
-
-exit:
-
- return res;
-}
-
-static void traffic_status_watchdog(struct adapter *padapter)
-{
- u8 bEnterPS;
- u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
- u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* */
- /* Determine if our traffic is busy now */
- /* */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 100 ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 100) {
- bBusyTraffic = true;
-
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
- bRxBusyTraffic = true;
- else
- bTxBusyTraffic = true;
- }
-
- /* Higher Tx/Rx data. */
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
- bHigherBusyTraffic = true;
-
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
- bHigherBusyRxTraffic = true;
- else
- bHigherBusyTxTraffic = true;
- }
-
- /* check traffic for powersaving. */
- if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
- (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2))
- bEnterPS = false;
- else
- bEnterPS = true;
-
- /* LeisurePS only work in infra mode. */
- if (bEnterPS)
- LPS_Enter(padapter);
- else
- LPS_Leave(padapter);
- } else {
- LPS_Leave(padapter);
- }
-
- pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
-}
-
-static void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
-{
- u32 txdma_status;
- int res;
-
- res = rtw_read32(padapter, REG_TXDMA_STATUS, &txdma_status);
- if (res)
- return;
-
- if (txdma_status != 0x00)
- rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
- /* total xmit irp = 4 */
-}
-
-static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf)
-{
- struct mlme_priv *pmlmepriv;
-
- padapter = (struct adapter *)pbuf;
- pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- expire_timeout_chk(padapter);
-
- rtl8188e_sreset_xmit_status_check(padapter);
-
- linked_status_chk(padapter);
- traffic_status_watchdog(padapter);
-
- rtl8188e_HalDmWatchDog(padapter);
-}
-
-static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 mstatus;
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
- return;
-
- switch (lps_ctrl_type) {
- case LPS_CTRL_SCAN:
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- /* connect */
- LPS_Leave(padapter);
- }
- break;
- case LPS_CTRL_JOINBSS:
- LPS_Leave(padapter);
- break;
- case LPS_CTRL_CONNECT:
- mstatus = 1;/* connect */
- /* Reset LPS Setting */
- padapter->pwrctrlpriv.LpsIdleCount = 0;
- rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
- break;
- case LPS_CTRL_DISCONNECT:
- mstatus = 0;/* disconnect */
- LPS_Leave(padapter);
- rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
- break;
- case LPS_CTRL_SPECIAL_PACKET:
- pwrpriv->DelayLPSLastTimeStamp = jiffies;
- LPS_Leave(padapter);
- break;
- case LPS_CTRL_LEAVE:
- LPS_Leave(padapter);
- break;
- default:
- break;
- }
-
-}
-
-u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- /* struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; */
- u8 res = _SUCCESS;
-
- /* if (!pwrctrlpriv->bLeisurePs) */
- /* return res; */
-
- if (enqueue) {
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
- pdrvextra_cmd_parm->type_size = lps_ctrl_type;
- pdrvextra_cmd_parm->pbuf = NULL;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
- } else {
- lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
- }
-
-exit:
-
- return res;
-}
-
-static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
-{
- struct hal_data_8188e *haldata = &padapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- ODM_RA_Set_TxRPT_Time(odmpriv, min_time);
-}
-
-u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = RTP_TIMER_CFG_WK_CID;
- pdrvextra_cmd_parm->type_size = min_time;
- pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
-{
- struct hal_data_8188e *haldata = &padapter->haldata;
-
- /* switch current antenna to optimum antenna */
- if (haldata->CurAntenna != antenna) {
- ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, antenna == 2 ? MAIN_ANT : AUX_ANT);
- haldata->CurAntenna = antenna;
- }
-}
-
-static bool rtw_antenna_diversity(struct adapter *adapter)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
-
- return haldata->AntDivCfg != 0;
-}
-
-u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- if (!rtw_antenna_diversity(padapter))
- return res;
-
- if (enqueue) {
- ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
- GFP_KERNEL);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = ANT_SELECT_WK_CID;
- pdrvextra_cmd_parm->type_size = antenna;
- pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
- } else {
- antenna_select_wk_hdl(padapter, antenna);
- }
-exit:
-
- return res;
-}
-
-u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return res;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
- pdrvextra_cmd_parm->type_size = intCmdType; /* As the command type. */
- pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-u8 rtw_ps_cmd(struct adapter *padapter)
-{
- struct cmd_obj *ppscmd;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- ppscmd = kzalloc(sizeof(*ppscmd), GFP_ATOMIC);
- if (!ppscmd) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ppscmd);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
- pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ppscmd);
-
-exit:
-
- return res;
-}
-
-static bool rtw_is_hi_queue_empty(struct adapter *adapter)
-{
- int res;
- u32 reg;
-
- res = rtw_read32(adapter, REG_HGQ_INFORMATION, &reg);
- if (res)
- return false;
-
- return (reg & 0x0000ff00) == 0;
-}
-
-static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
-{
- int cnt = 0;
- struct sta_info *psta_bmc;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta_bmc = rtw_get_bcmc_stainfo(padapter);
- if (!psta_bmc)
- return;
-
- if (psta_bmc->sleepq_len == 0) {
- bool val = rtw_is_hi_queue_empty(padapter);
-
- while (!val) {
- msleep(100);
-
- cnt++;
-
- if (cnt > 10)
- break;
-
- val = rtw_is_hi_queue_empty(padapter);
- }
-
- if (cnt <= 10) {
- pstapriv->tim_bitmap &= ~BIT(0);
- pstapriv->sta_dz_bitmap &= ~BIT(0);
-
- update_beacon(padapter, _TIM_IE_, NULL, false);
- } else { /* re check again */
- rtw_chk_hi_queue_cmd(padapter);
- }
- }
-}
-
-void rtw_chk_hi_queue_cmd(struct adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c)
- return;
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- return;
- }
-
- pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
- pdrvextra_cmd_parm->type_size = 0;
- pdrvextra_cmd_parm->pbuf = NULL;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- rtw_enqueue_cmd(pcmdpriv, ph2c);
-}
-
-u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
- pdrvextra_cmd_parm->type_size = c2h_evt ? 16 : 0;
- pdrvextra_cmd_parm->pbuf = c2h_evt;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-/* C2H event format:
- * Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
- * BITS [127:120] [119:16] [15:8] [7:4] [3:0]
- */
-static s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
-{
- s32 ret = _FAIL;
- struct c2h_evt_hdr *c2h_evt;
- int i;
- u8 trigger;
-
- if (!buf)
- goto exit;
-
- ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
- if (ret)
- return _FAIL;
-
- if (trigger == C2H_EVT_HOST_CLOSE)
- goto exit; /* Not ready */
- else if (trigger != C2H_EVT_FW_CLOSE)
- goto clear_evt; /* Not a valid value */
-
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- memset(c2h_evt, 0, 16);
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- /* Read the content */
- for (i = 0; i < c2h_evt->plen; i++) {
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
- sizeof(*c2h_evt) + i, c2h_evt->payload + i);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- }
-
- ret = _SUCCESS;
-
-clear_evt:
- /* Clear event to notify FW we have read the command.
- * If this field isn't clear, the FW won't update the next
- * command message.
- */
- rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
-exit:
- return ret;
-}
-
-static void c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
-{
- u8 buf[16];
-
- if (!c2h_evt)
- c2h_evt_read(adapter, buf);
-}
-
-static void c2h_wk_callback(struct work_struct *work)
-{
- struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
- struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
- struct c2h_evt_hdr *c2h_evt;
-
- evtpriv->c2h_wk_alive = true;
-
- while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
- c2h_evt = (struct c2h_evt_hdr *)rtw_cbuf_pop(evtpriv->c2h_queue);
- if (c2h_evt) {
- /* This C2H event is read, clear it */
- rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
- } else {
- c2h_evt = kmalloc(16, GFP_KERNEL);
- if (c2h_evt) {
- /* This C2H event is not read, read & clear now */
- if (c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS) {
- kfree(c2h_evt);
- continue;
- }
- } else {
- return;
- }
- }
-
- /* Special pointer to trigger c2h_evt_clear only */
- if ((void *)c2h_evt == (void *)evtpriv)
- continue;
-
- if (!c2h_evt_exist(c2h_evt)) {
- kfree(c2h_evt);
- continue;
- }
-
- /* Enqueue into cmd_thread for others */
- rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
- }
-
- evtpriv->c2h_wk_alive = false;
-}
-
-u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- struct drvextra_cmd_parm *pdrvextra_cmd;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
-
- switch (pdrvextra_cmd->ec_id) {
- case DYNAMIC_CHK_WK_CID:
- dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf);
- break;
- case POWER_SAVING_CTRL_WK_CID:
- rtw_ps_processor(padapter);
- break;
- case LPS_CTRL_WK_CID:
- lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
- break;
- case RTP_TIMER_CFG_WK_CID:
- rpt_timer_setting_wk_hdl(padapter, pdrvextra_cmd->type_size);
- break;
- case ANT_SELECT_WK_CID:
- antenna_select_wk_hdl(padapter, pdrvextra_cmd->type_size);
- break;
- case P2P_PS_WK_CID:
- p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
- break;
- case P2P_PROTO_WK_CID:
- /* Commented by Albert 2011/07/01 */
- /* I used the type_size as the type command */
- p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
- break;
- case CHECK_HIQ_WK_CID:
- rtw_chk_hi_queue_hdl(padapter);
- break;
- case C2H_WK_CID:
- c2h_evt_hdl(padapter, (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL);
- break;
- default:
- break;
- }
-
- if (pdrvextra_cmd->pbuf && pdrvextra_cmd->type_size > 0)
- kfree(pdrvextra_cmd->pbuf);
-
- return H2C_SUCCESS;
-}
-
-void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res != H2C_SUCCESS) {
- /* TODO: cancel timer and do timeout handler directly... */
- _set_timer(&pmlmepriv->scan_to_timer, 1);
- }
-
- /* free cmd */
- rtw_free_cmd_obj(pcmd);
-
-}
-
-void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res != H2C_SUCCESS) {
- spin_lock_bh(&pmlmepriv->lock);
- set_fwstate(pmlmepriv, _FW_LINKED);
- spin_unlock_bh(&pmlmepriv->lock);
-
- return;
- }
-
- /* clear bridge database */
- nat25_db_cleanup(padapter);
-
- /* free cmd */
- rtw_free_cmd_obj(pcmd);
-}
-
-void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res != H2C_SUCCESS) {
- /* TODO: cancel timer and do timeout handler directly... */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- }
-
- rtw_free_cmd_obj(pcmd);
-}
-
-void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct sta_info *psta = NULL;
- struct wlan_network *pwlan = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-
- if (pcmd->res != H2C_SUCCESS)
- _set_timer(&pmlmepriv->assoc_timer, 1);
-
- del_timer_sync(&pmlmepriv->assoc_timer);
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
- if (!psta) {
- psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
- if (!psta)
- goto createbss_cmd_fail;
- }
-
- rtw_indicate_connect(padapter);
- } else {
-
- pwlan = rtw_alloc_network(pmlmepriv);
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- if (!pwlan) {
- pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
- if (!pwlan) {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto createbss_cmd_fail;
- }
- pwlan->last_scanned = jiffies;
- } else {
- list_add_tail(&pwlan->list, &pmlmepriv->scanned_queue.queue);
- }
-
- pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
- memcpy(&pwlan->network, pnetwork, pnetwork->Length);
-
- memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
- }
-
-createbss_cmd_fail:
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- rtw_free_cmd_obj(pcmd);
-
-}
-
-void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
- struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
-
- if (!psta)
- goto exit;
-exit:
- rtw_free_cmd_obj(pcmd);
-
-}
-
-void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
- struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
- struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
-
- if (!psta)
- goto exit;
-
- psta->aid = passocsta_rsp->cam_id;
- psta->mac_id = passocsta_rsp->cam_id;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- set_fwstate(pmlmepriv, _FW_LINKED);
- spin_unlock_bh(&pmlmepriv->lock);
-
-exit:
- rtw_free_cmd_obj(pcmd);
-
-}
diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c
deleted file mode 100644
index df9534dd25cb..000000000000
--- a/drivers/staging/r8188eu/core/rtw_efuse.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_EFUSE_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_efuse.h"
-#include "../include/rtl8188e_hal.h"
-
-/* */
-/* Description: */
-/* Execute E-Fuse read byte operation. */
-/* Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
-void
-ReadEFuseByte(
- struct adapter *Adapter,
- u16 _offset,
- u8 *pbuf)
-{
- u32 value32;
- u8 readbyte;
- u16 retry;
- int res;
-
- /* Write Address */
- rtw_write8(Adapter, EFUSE_CTRL + 1, (_offset & 0xff));
- res = rtw_read8(Adapter, EFUSE_CTRL + 2, &readbyte);
- if (res)
- return;
-
- rtw_write8(Adapter, EFUSE_CTRL + 2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
-
- /* Write bit 32 0 */
- res = rtw_read8(Adapter, EFUSE_CTRL + 3, &readbyte);
- if (res)
- return;
-
- rtw_write8(Adapter, EFUSE_CTRL + 3, (readbyte & 0x7f));
-
- /* Check bit 32 read-ready */
- res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
- if (res)
- return;
-
- for (retry = 0; retry < 10000; retry++) {
- res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
- if (res)
- continue;
-
- if (((value32 >> 24) & 0xff) & 0x80)
- break;
- }
-
- /* 20100205 Joseph: Add delay suggested by SD1 Victor. */
- /* This fix the problem that Efuse read error in high temperature condition. */
- /* Designer says that there shall be some delay after ready bit is set, or the */
- /* result will always stay on last data we read. */
- udelay(50);
- res = rtw_read32(Adapter, EFUSE_CTRL, &value32);
- if (res)
- return;
-
- *pbuf = (u8)(value32 & 0xff);
-
- /* FIXME: return an error to caller */
-}
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
deleted file mode 100644
index 1e4baf74ecd5..000000000000
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include <linux/firmware.h>
-#include "../include/rtw_fw.h"
-
-#define MAX_REG_BLOCK_SIZE 196
-#define FW_8188E_START_ADDRESS 0x1000
-#define MAX_PAGE_SIZE 4096
-
-#define IS_FW_HEADER_EXIST(_fwhdr) \
- ((le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x92C0 || \
- (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88C0 || \
- (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x2300 || \
- (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88E0)
-
-struct rt_firmware_hdr {
- __le16 signature; /* 92C0: test chip; 92C,
- * 88C0: test chip; 88C1: MP A-cut;
- * 92C1: MP A-cut */
- u8 category; /* AP/NIC and USB/PCI */
- u8 function; /* Reserved for different FW function
- * indcation, for further use when
- * driver needs to download different
- * FW for different conditions */
- __le16 version; /* FW Version */
- u8 subversion; /* FW Subversion, default 0x00 */
- u8 rsvd1;
- u8 month; /* Release time Month field */
- u8 date; /* Release time Date field */
- u8 hour; /* Release time Hour field */
- u8 minute; /* Release time Minute field */
- __le16 ramcodesize; /* The size of RAM code */
- u8 foundry;
- u8 rsvd2;
- __le32 svnidx; /* The SVN entry index */
- __le32 rsvd3;
- __le32 rsvd4;
- __le32 rsvd5;
-};
-
-static_assert(sizeof(struct rt_firmware_hdr) == 32);
-
-static void fw_download_enable(struct adapter *padapter, bool enable)
-{
- u8 tmp;
- int res;
-
- if (enable) {
- /* MCU firmware download enable. */
- res = rtw_read8(padapter, REG_MCUFWDL, &tmp);
- if (res)
- return;
-
- rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01);
-
- /* 8051 reset */
- res = rtw_read8(padapter, REG_MCUFWDL + 2, &tmp);
- if (res)
- return;
-
- rtw_write8(padapter, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- /* MCU firmware download disable. */
- res = rtw_read8(padapter, REG_MCUFWDL, &tmp);
- if (res)
- return;
-
- rtw_write8(padapter, REG_MCUFWDL, tmp & 0xfe);
-
- /* Reserved for fw extension. */
- rtw_write8(padapter, REG_MCUFWDL + 1, 0x00);
- }
-}
-
-static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
-{
- int ret = _SUCCESS;
- u32 blocks, block_size, remain;
- u32 i, offset, addr;
- u8 *data;
-
- block_size = MAX_REG_BLOCK_SIZE;
-
- blocks = size / block_size;
- remain = size % block_size;
-
- for (i = 0; i < blocks; i++) {
- addr = FW_8188E_START_ADDRESS + i * block_size;
- data = buffer + i * block_size;
-
- if (rtw_writeN(padapter, addr, block_size, data))
- return _FAIL;
- }
-
- if (remain) {
- offset = blocks * block_size;
- block_size = 8;
-
- blocks = remain / block_size;
- remain = remain % block_size;
-
- for (i = 0; i < blocks; i++) {
- addr = FW_8188E_START_ADDRESS + offset + i * block_size;
- data = buffer + offset + i * block_size;
-
- if (rtw_writeN(padapter, addr, block_size, data))
- return _FAIL;
- }
- }
-
- if (remain) {
- offset += blocks * block_size;
-
- /* block size 1 */
- blocks = remain;
-
- for (i = 0; i < blocks; i++) {
- addr = FW_8188E_START_ADDRESS + offset + i;
- data = buffer + offset + i;
-
- ret = rtw_write8(padapter, addr, *data);
- if (ret == _FAIL)
- goto exit;
- }
- }
-
-exit:
- return ret;
-}
-
-static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size)
-{
- u8 value8;
- u8 u8Page = (u8)(page & 0x07);
- int res;
-
- res = rtw_read8(padapter, REG_MCUFWDL + 2, &value8);
- if (res)
- return _FAIL;
-
- value8 = (value8 & 0xF8) | u8Page;
- rtw_write8(padapter, REG_MCUFWDL + 2, value8);
-
- return block_write(padapter, buffer, size);
-}
-
-static int write_fw(struct adapter *padapter, u8 *buffer, u32 size)
-{
- /* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
- /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
- int ret = _SUCCESS;
- u32 pageNums, remainSize;
- u32 page, offset;
-
- pageNums = size / MAX_PAGE_SIZE;
- remainSize = size % MAX_PAGE_SIZE;
-
- for (page = 0; page < pageNums; page++) {
- offset = page * MAX_PAGE_SIZE;
- ret = page_write(padapter, page, buffer + offset, MAX_PAGE_SIZE);
-
- if (ret == _FAIL)
- goto exit;
- }
- if (remainSize) {
- offset = pageNums * MAX_PAGE_SIZE;
- page = pageNums;
- ret = page_write(padapter, page, buffer + offset, remainSize);
-
- if (ret == _FAIL)
- goto exit;
- }
-exit:
- return ret;
-}
-
-void rtw_reset_8051(struct adapter *padapter)
-{
- u8 val8;
- int res;
-
- res = rtw_read8(padapter, REG_SYS_FUNC_EN + 1, &val8);
- if (res)
- return;
-
- rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 & (~BIT(2)));
- rtw_write8(padapter, REG_SYS_FUNC_EN + 1, val8 | (BIT(2)));
-}
-
-static int fw_free_to_go(struct adapter *padapter)
-{
- u32 counter = 0;
- u32 value32;
- int res;
-
- /* polling CheckSum report */
- do {
- res = rtw_read32(padapter, REG_MCUFWDL, &value32);
- if (res)
- continue;
-
- if (value32 & FWDL_CHKSUM_RPT)
- break;
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- if (counter >= POLLING_READY_TIMEOUT_COUNT)
- return _FAIL;
-
- res = rtw_read32(padapter, REG_MCUFWDL, &value32);
- if (res)
- return _FAIL;
-
- value32 |= MCUFWDL_RDY;
- value32 &= ~WINTINI_RDY;
- rtw_write32(padapter, REG_MCUFWDL, value32);
-
- rtw_reset_8051(padapter);
-
- /* polling for FW ready */
- counter = 0;
- do {
- res = rtw_read32(padapter, REG_MCUFWDL, &value32);
- if (!res && value32 & WINTINI_RDY)
- return _SUCCESS;
-
- udelay(5);
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- return _FAIL;
-}
-
-static int load_firmware(struct rt_firmware *rtfw, struct device *device)
-{
- int ret = _SUCCESS;
- const struct firmware *fw;
- const char *fw_name = FW_RTL8188EU;
- int err = request_firmware(&fw, fw_name, device);
-
- if (err) {
- pr_err("Request firmware failed with error 0x%x\n", err);
- ret = _FAIL;
- goto exit;
- }
- if (!fw) {
- pr_err("Firmware %s not available\n", fw_name);
- ret = _FAIL;
- goto exit;
- }
-
- rtfw->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!rtfw->data) {
- pr_err("Failed to allocate rtfw->data\n");
- ret = _FAIL;
- goto exit;
- }
- rtfw->size = fw->size;
-
-exit:
- release_firmware(fw);
- return ret;
-}
-
-int rtl8188e_firmware_download(struct adapter *padapter)
-{
- int ret = _SUCCESS;
- u8 reg;
- unsigned long fwdl_timeout;
- struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- struct device *device = dvobj_to_dev(dvobj);
- struct rt_firmware_hdr *fwhdr = NULL;
- u8 *fw_data;
- u32 fw_size;
-
- if (!dvobj->firmware.data)
- ret = load_firmware(&dvobj->firmware, device);
- if (ret == _FAIL) {
- dvobj->firmware.data = NULL;
- goto exit;
- }
- fw_data = dvobj->firmware.data;
- fw_size = dvobj->firmware.size;
-
- fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data;
-
- if (IS_FW_HEADER_EXIST(fwhdr)) {
- dev_info_once(device, "Firmware Version %d, SubVersion %d, Signature 0x%x\n",
- le16_to_cpu(fwhdr->version), fwhdr->subversion,
- le16_to_cpu(fwhdr->signature));
-
- fw_data = fw_data + sizeof(struct rt_firmware_hdr);
- fw_size = fw_size - sizeof(struct rt_firmware_hdr);
- }
-
- /* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
- /* or it will cause download Fw fail. 2010.02.01. by tynli. */
- ret = rtw_read8(padapter, REG_MCUFWDL, &reg);
- if (ret) {
- ret = _FAIL;
- goto exit;
- }
-
- if (reg & RAM_DL_SEL) { /* 8051 RAM code */
- rtw_write8(padapter, REG_MCUFWDL, 0x00);
- rtw_reset_8051(padapter);
- }
-
- fw_download_enable(padapter, true);
- fwdl_timeout = jiffies + msecs_to_jiffies(500);
- do {
- /* reset the FWDL chksum */
- ret = rtw_read8(padapter, REG_MCUFWDL, &reg);
- if (ret) {
- ret = _FAIL;
- continue;
- }
-
- rtw_write8(padapter, REG_MCUFWDL, reg | FWDL_CHKSUM_RPT);
-
- ret = write_fw(padapter, fw_data, fw_size);
- if (ret == _SUCCESS)
- break;
- } while (!time_after(jiffies, fwdl_timeout));
-
- fw_download_enable(padapter, false);
- if (ret != _SUCCESS)
- goto exit;
-
- ret = fw_free_to_go(padapter);
- if (ret != _SUCCESS)
- goto exit;
-
-exit:
- return ret;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
deleted file mode 100644
index bc8543ea2e66..000000000000
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ /dev/null
@@ -1,1150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _IEEE80211_C
-
-#include "../include/drv_types.h"
-#include "../include/ieee80211.h"
-#include "../include/wifi.h"
-#include "../include/osdep_service.h"
-#include "../include/wlan_bssdef.h"
-#include "../include/usb_osintf.h"
-
-u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
-u16 RTW_WPA_VERSION = 1;
-u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 };
-u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 };
-u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 };
-u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 };
-u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 };
-u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 };
-u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 };
-u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 };
-u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 };
-
-u16 RSN_VERSION_BSD = 1;
-u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 };
-u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 };
-u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 };
-u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 };
-u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 };
-u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 };
-u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 };
-u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
-/* */
-/* for adhoc-master to generate ie and provide supported-rate to fw */
-/* */
-
-static u8 WIFI_CCKRATES[] = {
- (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
- (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
- };
-
-static u8 WIFI_OFDMRATES[] = {
- (IEEE80211_OFDM_RATE_6MB),
- (IEEE80211_OFDM_RATE_9MB),
- (IEEE80211_OFDM_RATE_12MB),
- (IEEE80211_OFDM_RATE_18MB),
- (IEEE80211_OFDM_RATE_24MB),
- IEEE80211_OFDM_RATE_36MB,
- IEEE80211_OFDM_RATE_48MB,
- IEEE80211_OFDM_RATE_54MB
- };
-
-int rtw_get_bit_value_from_ieee_value(u8 val)
-{
- unsigned char dot11_rate_table[] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48,
- 72, 96, 108, 0}; /* last element must be zero!! */
-
- int i = 0;
- while (dot11_rate_table[i] != 0) {
- if (dot11_rate_table[i] == val)
- return BIT(i);
- i++;
- }
- return 0;
-}
-
-bool rtw_is_cckrates_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
- return true;
- i++;
- }
- return false;
-}
-
-bool rtw_is_cckratesonly_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i] != 0) {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
- return false;
- i++;
- }
-
- return true;
-}
-
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
-{
- if (channel > 14)
- return WIRELESS_INVALID;
- /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
-}
-
-u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
- unsigned int *frlen)
-{
- memcpy((void *)pbuf, (void *)source, len);
- *frlen = *frlen + len;
- return pbuf + len;
-}
-
-/* rtw_set_ie will update frame length */
-u8 *rtw_set_ie
-(
- u8 *pbuf,
- int index,
- uint len,
- u8 *source,
- uint *frlen /* frame length */
-)
-{
-
- *pbuf = (u8)index;
-
- *(pbuf + 1) = (u8)len;
-
- if (len > 0)
- memcpy((void *)(pbuf + 2), (void *)source, len);
-
- *frlen = *frlen + (len + 2);
-
- return pbuf + len + 2;
-}
-
-/*----------------------------------------------------------------------------
-index: the information element id index, limit is the limit for search
------------------------------------------------------------------------------*/
-u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
-{
- int tmp, i;
- u8 *p;
-
- if (limit < 1) {
-
- return NULL;
- }
-
- p = pbuf;
- i = 0;
- *len = 0;
- while (1) {
- if (*p == index) {
- *len = *(p + 1);
- return p;
- }
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
- if (i >= limit)
- break;
- }
-
- return NULL;
-}
-
-void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
-{
-
- memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
-
- switch (mode) {
- case WIRELESS_11B:
- memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
- break;
- case WIRELESS_11G:
- memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
- break;
- case WIRELESS_11BG:
- case WIRELESS_11G_24N:
- case WIRELESS_11_24N:
- case WIRELESS_11BG_24N:
- memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
- memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
- break;
- }
-
-}
-
-uint rtw_get_rateset_len(u8 *rateset)
-{
- uint i = 0;
-
- while (1) {
- if ((rateset[i]) == 0)
- break;
- if (i > 12)
- break;
- i++;
- }
-
- return i;
-}
-
-int rtw_generate_ie(struct registry_priv *pregistrypriv)
-{
- u8 wireless_mode;
- int sz = 0, rateLen;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- u8 *ie = pdev_network->IEs;
-
- /* timestamp will be inserted by hardware */
- sz += 8;
- ie += sz;
-
- /* beacon interval : 2bytes */
- *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);/* BCN_INTERVAL; */
- sz += 2;
- ie += 2;
-
- /* capability info */
- *(u16 *)ie = 0;
-
- *(__le16 *)ie |= cpu_to_le16(cap_IBSS);
-
- if (pregistrypriv->preamble == PREAMBLE_SHORT)
- *(__le16 *)ie |= cpu_to_le16(cap_ShortPremble);
-
- if (pdev_network->Privacy)
- *(__le16 *)ie |= cpu_to_le16(cap_Privacy);
-
- sz += 2;
- ie += 2;
-
- /* SSID */
- ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
-
- /* supported rates */
- wireless_mode = pregistrypriv->wireless_mode;
-
- rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
-
- rateLen = rtw_get_rateset_len(pdev_network->SupportedRates);
-
- if (rateLen > 8) {
- ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz);
- /* ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
- } else {
- ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz);
- }
-
- /* DS parameter set */
- ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&pdev_network->Configuration.DSConfig, &sz);
-
- /* IBSS Parameter Set */
-
- ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&pdev_network->Configuration.ATIMWindow, &sz);
-
- if (rateLen > 8)
- ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
-
- return sz;
-}
-
-unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
-{
- int len;
- u16 val16;
- __le16 le_tmp;
- unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
- u8 *pbuf = pie;
- int limit_new = limit;
-
- while (1) {
- pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new);
-
- if (pbuf) {
- /* check if oui matches... */
- if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
- goto check_next_ie;
-
- /* check version... */
- memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
-
- val16 = le16_to_cpu(le_tmp);
- if (val16 != 0x0001)
- goto check_next_ie;
- *wpa_ie_len = *(pbuf + 1);
- return pbuf;
- }
- *wpa_ie_len = 0;
- return NULL;
-
-check_next_ie:
- limit_new = limit - (pbuf - pie) - 2 - len;
- if (limit_new <= 0)
- break;
- pbuf += (2 + len);
- }
- *wpa_ie_len = 0;
- return NULL;
-}
-
-unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
-{
-
- return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
-}
-
-int rtw_get_wpa_cipher_suite(u8 *s)
-{
- if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN))
- return WPA_CIPHER_NONE;
- if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN))
- return WPA_CIPHER_WEP40;
- if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN))
- return WPA_CIPHER_TKIP;
- if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN))
- return WPA_CIPHER_CCMP;
- if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN))
- return WPA_CIPHER_WEP104;
-
- return 0;
-}
-
-int rtw_get_wpa2_cipher_suite(u8 *s)
-{
- if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN))
- return WPA_CIPHER_NONE;
- if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN))
- return WPA_CIPHER_WEP40;
- if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN))
- return WPA_CIPHER_TKIP;
- if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN))
- return WPA_CIPHER_CCMP;
- if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN))
- return WPA_CIPHER_WEP104;
-
- return 0;
-}
-
-int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
-{
- int i, ret = _SUCCESS;
- int left, count;
- u8 *pos;
- u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
-
- if (wpa_ie_len <= 0) {
- /* No WPA IE - fail silently */
- return _FAIL;
- }
-
- if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) ||
- (memcmp(wpa_ie + 2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
- return _FAIL;
-
- pos = wpa_ie;
-
- pos += 8;
- left = wpa_ie_len - 8;
-
- /* group_cipher */
- if (left >= WPA_SELECTOR_LEN) {
- *group_cipher = rtw_get_wpa_cipher_suite(pos);
- pos += WPA_SELECTOR_LEN;
- left -= WPA_SELECTOR_LEN;
- } else if (left > 0) {
- return _FAIL;
- }
-
- /* pairwise_cipher */
- if (left >= 2) {
- count = get_unaligned_le16(pos);
- pos += 2;
- left -= 2;
-
- if (count == 0 || left < count * WPA_SELECTOR_LEN)
- return _FAIL;
-
- for (i = 0; i < count; i++) {
- *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
-
- pos += WPA_SELECTOR_LEN;
- left -= WPA_SELECTOR_LEN;
- }
- } else if (left == 1) {
- return _FAIL;
- }
-
- if (is_8021x) {
- if (left >= 6) {
- pos += 2;
- if (!memcmp(pos, SUITE_1X, 4))
- *is_8021x = 1;
- }
- }
-
- return ret;
-}
-
-int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
-{
- int i, ret = _SUCCESS;
- int left, count;
- u8 *pos;
- u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
-
- if (rsn_ie_len <= 0) {
- /* No RSN IE - fail silently */
- return _FAIL;
- }
-
- if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie + 1) != (u8)(rsn_ie_len - 2)))
- return _FAIL;
-
- pos = rsn_ie;
- pos += 4;
- left = rsn_ie_len - 4;
-
- /* group_cipher */
- if (left >= RSN_SELECTOR_LEN) {
- *group_cipher = rtw_get_wpa2_cipher_suite(pos);
-
- pos += RSN_SELECTOR_LEN;
- left -= RSN_SELECTOR_LEN;
-
- } else if (left > 0) {
- return _FAIL;
- }
-
- /* pairwise_cipher */
- if (left >= 2) {
- count = get_unaligned_le16(pos);
- pos += 2;
- left -= 2;
-
- if (count == 0 || left < count * RSN_SELECTOR_LEN)
- return _FAIL;
-
- for (i = 0; i < count; i++) {
- *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
-
- pos += RSN_SELECTOR_LEN;
- left -= RSN_SELECTOR_LEN;
- }
-
- } else if (left == 1) {
- return _FAIL;
- }
-
- if (is_8021x) {
- if (left >= 6) {
- pos += 2;
- if (!memcmp(pos, SUITE_1X, 4))
- *is_8021x = 1;
- }
- }
- return ret;
-}
-
-int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
-{
- u8 authmode;
- u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
- uint cnt;
-
- /* Search required WPA or WPA2 IE and copy to sec_ie[] */
-
- cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
-
- while (cnt < in_len) {
- authmode = in_ie[cnt];
-
- if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) {
- if (wpa_ie)
- memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
-
- *wpa_len = in_ie[cnt + 1] + 2;
- cnt += in_ie[cnt + 1] + 2; /* get next */
- } else {
- if (authmode == _WPA2_IE_ID_) {
- if (rsn_ie)
- memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
-
- *rsn_len = in_ie[cnt + 1] + 2;
- cnt += in_ie[cnt + 1] + 2; /* get next */
- } else {
- cnt += in_ie[cnt + 1] + 2; /* get next */
- }
- }
- }
-
- return *rsn_len + *wpa_len;
-}
-
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
-{
- u8 match = false;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- if (!ie_ptr)
- return match;
-
- eid = ie_ptr[0];
-
- if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
- *wps_ielen = ie_ptr[1] + 2;
- match = true;
- }
- return match;
-}
-
-/**
- * rtw_get_wps_ie - Search WPS IE from a series of IEs
- * @in_ie: Address of IEs to search
- * @in_len: Length limit from in_ie
- * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
- * @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
- *
- * Returns: The address of the WPS IE found, or NULL
- */
-u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
-{
- uint cnt;
- u8 *wpsie_ptr = NULL;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- if (wps_ielen)
- *wps_ielen = 0;
-
- if (!in_ie || in_len <= 0)
- return wpsie_ptr;
-
- cnt = 0;
-
- while (cnt < in_len) {
- eid = in_ie[cnt];
-
- if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], wps_oui, 4))) {
- wpsie_ptr = &in_ie[cnt];
-
- if (wps_ie)
- memcpy(wps_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
-
- if (wps_ielen)
- *wps_ielen = in_ie[cnt + 1] + 2;
-
- cnt += in_ie[cnt + 1] + 2;
-
- break;
- }
- cnt += in_ie[cnt + 1] + 2; /* goto next */
- }
- return wpsie_ptr;
-}
-
-/**
- * rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE
- * @wps_ie: Address of WPS IE to search
- * @wps_ielen: Length limit from wps_ie
- * @target_attr_id: The attribute ID of WPS attribute to search
- * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr
- * @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute
- *
- * Returns: the address of the specific WPS attribute found, or NULL
- */
-u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr)
-{
- u8 *attr_ptr = NULL;
- u8 *target_attr_ptr = NULL;
- u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
-
- if (len_attr)
- *len_attr = 0;
-
- if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- (memcmp(wps_ie + 2, wps_oui, 4)))
- return attr_ptr;
-
- /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
- attr_ptr = wps_ie + 6; /* goto first attr */
-
- while (attr_ptr - wps_ie < wps_ielen) {
- /* 4 = 2(Attribute ID) + 2(Length) */
- u16 attr_id = RTW_GET_BE16(attr_ptr);
- u16 attr_data_len = RTW_GET_BE16(attr_ptr + 2);
- u16 attr_len = attr_data_len + 4;
-
- if (attr_id == target_attr_id) {
- target_attr_ptr = attr_ptr;
- if (buf_attr)
- memcpy(buf_attr, attr_ptr, attr_len);
- if (len_attr)
- *len_attr = attr_len;
- break;
- }
- attr_ptr += attr_len; /* goto next */
- }
- return target_attr_ptr;
-}
-
-/**
- * rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE
- * @wps_ie: Address of WPS IE to search
- * @wps_ielen: Length limit from wps_ie
- * @target_attr_id: The attribute ID of WPS attribute to search
- * @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content
- * @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content
- *
- * Returns: the address of the specific WPS attribute content found, or NULL
- */
-u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content)
-{
- u8 *attr_ptr;
- u32 attr_len;
-
- if (len_content)
- *len_content = 0;
-
- attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len);
-
- if (attr_ptr && attr_len) {
- if (buf_content)
- memcpy(buf_content, attr_ptr + 4, attr_len - 4);
-
- if (len_content)
- *len_content = attr_len - 4;
-
- return attr_ptr + 4;
- }
-
- return NULL;
-}
-
-static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
- struct rtw_ieee802_11_elems *elems,
- int show_errors)
-{
- unsigned int oui;
-
- /* first 3 bytes in vendor specific information element are the IEEE
- * OUI of the vendor. The following byte is used a vendor specific
- * sub-type. */
- if (elen < 4)
- return -1;
-
- oui = RTW_GET_BE24(pos);
- switch (oui) {
- case OUI_MICROSOFT:
- /* Microsoft/Wi-Fi information elements are further typed and
- * subtyped */
- switch (pos[3]) {
- case 1:
- /* Microsoft OUI (00:50:F2) with OUI Type 1:
- * real WPA information element */
- elems->wpa_ie = pos;
- elems->wpa_ie_len = elen;
- break;
- case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
- if (elen < 5)
- return -1;
- switch (pos[4]) {
- case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
- case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
- elems->wme = pos;
- elems->wme_len = elen;
- break;
- case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
- elems->wme_tspec = pos;
- elems->wme_tspec_len = elen;
- break;
- default:
- return -1;
- }
- break;
- case 4:
- /* Wi-Fi Protected Setup (WPS) IE */
- elems->wps_ie = pos;
- elems->wps_ie_len = elen;
- break;
- default:
- return -1;
- }
- break;
-
- case OUI_BROADCOM:
- switch (pos[3]) {
- case VENDOR_HT_CAPAB_OUI_TYPE:
- elems->vendor_ht_cap = pos;
- elems->vendor_ht_cap_len = elen;
- break;
- default:
- return -1;
- }
- break;
- default:
- return -1;
- }
- return 0;
-}
-
-/**
- * ieee802_11_parse_elems - Parse information elements in management frames
- * @start: Pointer to the start of IEs
- * @len: Length of IE buffer in octets
- * @elems: Data structure for parsed elements
- * @show_errors: Whether to show parsing errors in debug log
- * Returns: Parsing result
- */
-enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
- struct rtw_ieee802_11_elems *elems,
- int show_errors)
-{
- uint left = len;
- u8 *pos = start;
- int unknown = 0;
-
- memset(elems, 0, sizeof(*elems));
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left)
- return ParseFailed;
-
- switch (id) {
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_FH_PARAMS:
- elems->fh_params = pos;
- elems->fh_params_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- elems->ds_params = pos;
- elems->ds_params_len = elen;
- break;
- case WLAN_EID_CF_PARAMS:
- elems->cf_params = pos;
- elems->cf_params_len = elen;
- break;
- case WLAN_EID_TIM:
- elems->tim = pos;
- elems->tim_len = elen;
- break;
- case WLAN_EID_IBSS_PARAMS:
- elems->ibss_params = pos;
- elems->ibss_params_len = elen;
- break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- elems->erp_info = pos;
- elems->erp_info_len = elen;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (rtw_ieee802_11_parse_vendor_specific(pos, elen, elems, show_errors))
- unknown++;
- break;
- case WLAN_EID_RSN:
- elems->rsn_ie = pos;
- elems->rsn_ie_len = elen;
- break;
- case WLAN_EID_PWR_CAPABILITY:
- elems->power_cap = pos;
- elems->power_cap_len = elen;
- break;
- case WLAN_EID_SUPPORTED_CHANNELS:
- elems->supp_channels = pos;
- elems->supp_channels_len = elen;
- break;
- case WLAN_EID_MOBILITY_DOMAIN:
- elems->mdie = pos;
- elems->mdie_len = elen;
- break;
- case WLAN_EID_FAST_BSS_TRANSITION:
- elems->ftie = pos;
- elems->ftie_len = elen;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- elems->timeout_int = pos;
- elems->timeout_int_len = elen;
- break;
- case WLAN_EID_HT_CAP:
- elems->ht_capabilities = pos;
- elems->ht_capabilities_len = elen;
- break;
- case WLAN_EID_HT_OPERATION:
- elems->ht_operation = pos;
- elems->ht_operation_len = elen;
- break;
- default:
- unknown++;
- break;
- }
- left -= elen;
- pos += elen;
- }
- if (left)
- return ParseFailed;
- return unknown ? ParseUnknown : ParseOK;
-}
-
-u8 key_char2num(u8 ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- else if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- else if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- else
- return 0xff;
-}
-
-u8 str_2char2num(u8 hch, u8 lch)
-{
- return (key_char2num(hch) * 10) + key_char2num(lch);
-}
-
-u8 key_2char2num(u8 hch, u8 lch)
-{
- return (key_char2num(hch) << 4) | key_char2num(lch);
-}
-
-void rtw_macaddr_cfg(u8 *mac_addr)
-{
- u8 mac[ETH_ALEN];
-
- if (!mac_addr)
- return;
-
- if (rtw_initmac && mac_pton(rtw_initmac, mac)) {
- /* Users specify the mac address */
- ether_addr_copy(mac_addr, mac);
- } else {
- /* Use the mac address stored in the Efuse */
- ether_addr_copy(mac, mac_addr);
- }
-
- if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac))
- eth_random_addr(mac_addr);
-}
-
-/**
- * rtw_get_p2p_ie - Search P2P IE from a series of IEs
- * @in_ie: Address of IEs to search
- * @in_len: Length limit from in_ie
- * @p2p_ie: If not NULL and P2P IE is found, P2P IE will be copied to the buf starting from p2p_ie
- * @p2p_ielen: If not NULL and P2P IE is found, will set to the length of the entire P2P IE
- *
- * Returns: The address of the P2P IE found, or NULL
- */
-u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
-{
- uint cnt = 0;
- u8 *p2p_ie_ptr;
- u8 eid, p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
-
- if (p2p_ielen)
- *p2p_ielen = 0;
-
- while (cnt < in_len) {
- eid = in_ie[cnt];
- if ((in_len < 0) || (cnt > MAX_IE_SZ)) {
- dump_stack();
- return NULL;
- }
- if ((eid == _VENDOR_SPECIFIC_IE_) && !memcmp(&in_ie[cnt + 2], p2p_oui, 4)) {
- p2p_ie_ptr = in_ie + cnt;
-
- if (p2p_ie)
- memcpy(p2p_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
- if (p2p_ielen)
- *p2p_ielen = in_ie[cnt + 1] + 2;
- return p2p_ie_ptr;
- }
- cnt += in_ie[cnt + 1] + 2; /* goto next */
- }
- return NULL;
-}
-
-/**
- * rtw_get_p2p_attr - Search a specific P2P attribute from a given P2P IE
- * @p2p_ie: Address of P2P IE to search
- * @p2p_ielen: Length limit from p2p_ie
- * @target_attr_id: The attribute ID of P2P attribute to search
- * @buf_attr: If not NULL and the P2P attribute is found, P2P attribute will be copied to the buf starting from buf_attr
- * @len_attr: If not NULL and the P2P attribute is found, will set to the length of the entire P2P attribute
- *
- * Returns: the address of the specific WPS attribute found, or NULL
- */
-u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id, u8 *buf_attr, u32 *len_attr)
-{
- u8 *attr_ptr = NULL;
- u8 *target_attr_ptr = NULL;
- u8 p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
-
- if (len_attr)
- *len_attr = 0;
-
- if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- memcmp(p2p_ie + 2, p2p_oui, 4))
- return attr_ptr;
-
- /* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
- attr_ptr = p2p_ie + 6; /* goto first attr */
-
- while (attr_ptr - p2p_ie < p2p_ielen) {
- /* 3 = 1(Attribute ID) + 2(Length) */
- u8 attr_id = *attr_ptr;
- u16 attr_data_len = get_unaligned_le16(attr_ptr + 1);
- u16 attr_len = attr_data_len + 3;
-
- if (attr_id == target_attr_id) {
- target_attr_ptr = attr_ptr;
-
- if (buf_attr)
- memcpy(buf_attr, attr_ptr, attr_len);
- if (len_attr)
- *len_attr = attr_len;
- break;
- }
- attr_ptr += attr_len; /* goto next */
- }
- return target_attr_ptr;
-}
-
-/**
- * rtw_get_p2p_attr_content - Search a specific P2P attribute content from a given P2P IE
- * @p2p_ie: Address of P2P IE to search
- * @p2p_ielen: Length limit from p2p_ie
- * @target_attr_id: The attribute ID of P2P attribute to search
- * @buf_content: If not NULL and the P2P attribute is found, P2P attribute content will be copied to the buf starting from buf_content
- * @len_content: If not NULL and the P2P attribute is found, will set to the length of the P2P attribute content
- *
- * Returns: the address of the specific P2P attribute content found, or NULL
- */
-u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id, u8 *buf_content, uint *len_content)
-{
- u8 *attr_ptr;
- u32 attr_len;
-
- if (len_content)
- *len_content = 0;
-
- attr_ptr = rtw_get_p2p_attr(p2p_ie, p2p_ielen, target_attr_id, NULL, &attr_len);
-
- if (attr_ptr && attr_len) {
- if (buf_content)
- memcpy(buf_content, attr_ptr + 3, attr_len - 3);
-
- if (len_content)
- *len_content = attr_len - 3;
-
- return attr_ptr + 3;
- }
-
- return NULL;
-}
-
-u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len, u8 *pdata_attr)
-{
- u32 a_len;
-
- *pbuf = attr_id;
-
- /* u16*)(pbuf + 1) = cpu_to_le16(attr_len); */
- RTW_PUT_LE16(pbuf + 1, attr_len);
-
- if (pdata_attr)
- memcpy(pbuf + 3, pdata_attr, attr_len);
-
- a_len = attr_len + 3;
-
- return a_len;
-}
-
-static uint rtw_p2p_attr_remove(u8 *ie, uint ielen_ori, u8 attr_id)
-{
- u8 *target_attr;
- u32 target_attr_len;
- uint ielen = ielen_ori;
-
- while (1) {
- target_attr = rtw_get_p2p_attr(ie, ielen, attr_id, NULL, &target_attr_len);
- if (target_attr && target_attr_len) {
- u8 *next_attr = target_attr + target_attr_len;
- uint remain_len = ielen - (next_attr - ie);
-
- memset(target_attr, 0, target_attr_len);
- memcpy(target_attr, next_attr, remain_len);
- memset(target_attr + remain_len, 0, target_attr_len);
- *(ie + 1) -= target_attr_len;
- ielen -= target_attr_len;
- } else {
- break;
- }
- }
- return ielen;
-}
-
-void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex, u8 attr_id)
-{
- u8 *p2p_ie;
- uint p2p_ielen, p2p_ielen_ori;
-
- p2p_ie = rtw_get_p2p_ie(bss_ex->IEs + _FIXED_IE_LENGTH_, bss_ex->IELength - _FIXED_IE_LENGTH_, NULL, &p2p_ielen_ori);
- if (p2p_ie) {
- p2p_ielen = rtw_p2p_attr_remove(p2p_ie, p2p_ielen_ori, attr_id);
- if (p2p_ielen != p2p_ielen_ori) {
- u8 *next_ie_ori = p2p_ie + p2p_ielen_ori;
- u8 *next_ie = p2p_ie + p2p_ielen;
- uint remain_len = bss_ex->IELength - (next_ie_ori - bss_ex->IEs);
-
- memcpy(next_ie, next_ie_ori, remain_len);
- memset(next_ie + remain_len, 0, p2p_ielen_ori - p2p_ielen);
- bss_ex->IELength -= p2p_ielen_ori - p2p_ielen;
- }
- }
-}
-
-static int rtw_get_cipher_info(struct wlan_network *pnetwork)
-{
- u32 wpa_ielen;
- unsigned char *pbuf;
- int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
- int ret = _FAIL;
-
- pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
-
- if (pbuf && (wpa_ielen > 0)) {
- if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
- pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
- pnetwork->BcnInfo.group_cipher = group_cipher;
- pnetwork->BcnInfo.is_8021x = is8021x;
- ret = _SUCCESS;
- }
- } else {
- pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
-
- if (pbuf && (wpa_ielen > 0)) {
- if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
- pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
- pnetwork->BcnInfo.group_cipher = group_cipher;
- pnetwork->BcnInfo.is_8021x = is8021x;
- ret = _SUCCESS;
- }
- }
- }
-
- return ret;
-}
-
-void rtw_get_bcn_info(struct wlan_network *pnetwork)
-{
- unsigned short cap = 0;
- u8 bencrypt = 0;
- __le16 le_tmp;
- u16 wpa_len = 0, rsn_len = 0;
- struct HT_info_element *pht_info = NULL;
- struct ieee80211_ht_cap *pht_cap = NULL;
- unsigned int len;
- unsigned char *p;
-
- memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
- cap = le16_to_cpu(le_tmp);
- if (cap & WLAN_CAPABILITY_PRIVACY) {
- bencrypt = 1;
- pnetwork->network.Privacy = 1;
- } else {
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
- }
- rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len);
-
- if (rsn_len > 0) {
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
- } else if (wpa_len > 0) {
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
- } else {
- if (bencrypt)
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
- }
- rtw_get_cipher_info(pnetwork);
-
- /* get bwmode and ch_offset */
- /* parsing HT_CAP_IE */
- p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
- if (p && len > 0) {
- pht_cap = (struct ieee80211_ht_cap *)(p + 2);
- pnetwork->BcnInfo.ht_cap_info = le16_to_cpu(pht_cap->cap_info);
- } else {
- pnetwork->BcnInfo.ht_cap_info = 0;
- }
- /* parsing HT_INFO_IE */
- p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
- if (p && len > 0) {
- pht_info = (struct HT_info_element *)(p + 2);
- pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
- } else {
- pnetwork->BcnInfo.ht_info_infos_0 = 0;
- }
-}
-
-/* show MCS rate, unit: 100Kbps */
-u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
-{
- u16 max_rate = 0;
-
- if (MCS_rate[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
- else if (MCS_rate[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
- else if (MCS_rate[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
- else if (MCS_rate[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- else if (MCS_rate[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
-
- return max_rate;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
deleted file mode 100644
index 785c0dba508f..000000000000
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ /dev/null
@@ -1,479 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_IOCTL_SET_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_ioctl_set.h"
-#include "../include/hal_intf.h"
-
-#include "../include/usb_osintf.h"
-#include "../include/usb_ops.h"
-
-u8 rtw_do_join(struct adapter *padapter)
-{
- struct list_head *plist, *phead;
- u8 *pibss = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- u8 ret = _SUCCESS;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- phead = get_list_head(queue);
- plist = phead->next;
-
- pmlmepriv->cur_network.join_res = -2;
-
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
-
- pmlmepriv->pscanned = plist;
-
- pmlmepriv->to_join = true;
-
- if (list_empty(&queue->queue)) {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
- /* we try to issue sitesurvey firstly */
-
- if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
- pmlmepriv->to_roaming > 0) {
- /* submit site_survey_cmd */
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
- if (ret != _SUCCESS)
- pmlmepriv->to_join = false;
- } else {
- pmlmepriv->to_join = false;
- ret = _FAIL;
- }
-
- return ret;
- } else {
- int select_ret;
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (select_ret == _SUCCESS) {
- pmlmepriv->to_join = false;
- _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
- } else {
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* submit createbss_cmd to change to a ADHOC_MASTER */
-
- /* pmlmepriv->lock has been acquired by caller... */
- struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
-
- pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
-
- pibss = padapter->registrypriv.dev_network.MacAddress;
-
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
-
- rtw_update_registrypriv_dev_network(padapter);
-
- rtw_generate_random_ibss(pibss);
-
- if (rtw_createbss_cmd(padapter) != _SUCCESS)
- return false;
-
- pmlmepriv->to_join = false;
- } else {
- /* can't associate ; reset under-linking */
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */
- /* we try to issue sitesurvey firstly */
- if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
- pmlmepriv->to_roaming > 0) {
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
- if (ret != _SUCCESS)
- pmlmepriv->to_join = false;
- } else {
- ret = _FAIL;
- pmlmepriv->to_join = false;
- }
- }
- }
- }
-
- return ret;
-}
-
-u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
-{
- u8 status = _SUCCESS;
- u32 cur_time = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
- bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
- (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF &&
- bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) {
- status = _FAIL;
- goto exit;
- }
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- goto handle_tkip_countermeasure;
- else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- goto release_mlme_lock;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
- if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
- } else {
- rtw_disassoc_cmd(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect(padapter);
-
- rtw_free_assoc_resources(padapter, 1);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- }
- }
- }
-
-handle_tkip_countermeasure:
- /* should we add something here...? */
-
- if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = jiffies;
-
- if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
- padapter->securitypriv.btkip_countermeasure = false;
- padapter->securitypriv.btkip_countermeasure_time = 0;
- } else {
- status = _FAIL;
- goto release_mlme_lock;
- }
- }
-
- memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN);
- pmlmepriv->assoc_by_bssid = true;
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- pmlmepriv->to_join = true;
- else
- status = rtw_do_join(padapter);
-
-release_mlme_lock:
- spin_unlock_bh(&pmlmepriv->lock);
-
-exit:
- return status;
-}
-
-u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
-{
- u8 status = _SUCCESS;
- u32 cur_time = 0;
-
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *pnetwork = &pmlmepriv->cur_network;
-
- if (!padapter->hw_init_completed) {
- status = _FAIL;
- goto exit;
- }
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- goto handle_tkip_countermeasure;
- } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- goto release_mlme_lock;
- }
-
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
- (!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
- if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- if (!rtw_is_same_ibss(padapter, pnetwork)) {
- /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
- rtw_disassoc_cmd(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect(padapter);
-
- rtw_free_assoc_resources(padapter, 1);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- }
- } else {
- goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
- }
- } else {
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1);
- }
- } else {
- rtw_disassoc_cmd(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect(padapter);
-
- rtw_free_assoc_resources(padapter, 1);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- }
- }
- }
-
-handle_tkip_countermeasure:
-
- if (padapter->securitypriv.btkip_countermeasure) {
- cur_time = jiffies;
-
- if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
- padapter->securitypriv.btkip_countermeasure = false;
- padapter->securitypriv.btkip_countermeasure_time = 0;
- } else {
- status = _FAIL;
- goto release_mlme_lock;
- }
- }
-
- memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
- pmlmepriv->assoc_by_bssid = false;
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- pmlmepriv->to_join = true;
- } else {
- status = rtw_do_join(padapter);
- }
-
-release_mlme_lock:
- spin_unlock_bh(&pmlmepriv->lock);
-
-exit:
- return status;
-}
-
-u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
- enum ndis_802_11_network_infra networktype)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- enum ndis_802_11_network_infra *pold_state = &cur_network->network.InfrastructureMode;
-
- if (*pold_state != networktype) {
- spin_lock_bh(&pmlmepriv->lock);
-
- if (*pold_state == Ndis802_11APMode) {
- /* change to other mode from Ndis802_11APMode */
- cur_network->join_res = -1;
-
- stop_ap_mode(padapter);
- }
-
- if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
- (*pold_state == Ndis802_11IBSS))
- rtw_disassoc_cmd(padapter, 0, true);
-
- if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- rtw_free_assoc_resources(padapter, 1);
-
- if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */
- }
-
- *pold_state = networktype;
-
- _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
-
- switch (networktype) {
- case Ndis802_11IBSS:
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- break;
- case Ndis802_11Infrastructure:
- set_fwstate(pmlmepriv, WIFI_STATION_STATE);
- break;
- case Ndis802_11APMode:
- set_fwstate(pmlmepriv, WIFI_AP_STATE);
- break;
- case Ndis802_11AutoUnknown:
- case Ndis802_11InfrastructureMax:
- break;
- }
- spin_unlock_bh(&pmlmepriv->lock);
- }
-
- return true;
-}
-
-void rtw_set_802_11_disassociate(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- rtw_disassoc_cmd(padapter, 0, true);
- rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
- rtw_pwr_wakeup(padapter);
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 res = true;
-
- if (!padapter) {
- res = false;
- goto exit;
- }
- if (!padapter->hw_init_completed) {
- res = false;
- goto exit;
- }
-
- if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING)) ||
- (pmlmepriv->LinkDetectInfo.bBusyTraffic)) {
- /* Scan or linking is in progress, do nothing. */
- res = true;
- } else {
- spin_lock_bh(&pmlmepriv->lock);
-
- res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num);
-
- spin_unlock_bh(&pmlmepriv->lock);
- }
-exit:
-
- return res;
-}
-
-u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11_auth_mode authmode)
-{
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- int res;
- u8 ret;
-
- psecuritypriv->ndisauthtype = authmode;
-
- if (psecuritypriv->ndisauthtype > 3)
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- res = rtw_set_auth(padapter, psecuritypriv);
-
- if (res == _SUCCESS)
- ret = true;
- else
- ret = false;
-
- return ret;
-}
-
-u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
-{
- int keyid, res;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- u8 ret = _SUCCESS;
-
- keyid = wep->KeyIndex & 0x3fffffff;
-
- if (keyid >= 4) {
- ret = false;
- goto exit;
- }
-
- switch (wep->KeyLength) {
- case 5:
- psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
- break;
- case 13:
- psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
- break;
- default:
- psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- break;
- }
-
- memcpy(&psecuritypriv->dot11DefKey[keyid].skey[0], &wep->KeyMaterial, wep->KeyLength);
-
- psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength;
-
- psecuritypriv->dot11PrivacyKeyIndex = keyid;
-
- res = rtw_set_key(padapter, psecuritypriv, keyid, 1);
-
- if (res == _FAIL)
- ret = false;
-exit:
-
- return ret;
-}
-
-/*
-* rtw_get_cur_max_rate -
-* @adapter: pointer to struct adapter structure
-*
-* Return 0 or 100Kbps
-*/
-u16 rtw_get_cur_max_rate(struct adapter *adapter)
-{
- int i = 0;
- u8 *p;
- u16 rate = 0, max_rate = 0;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- struct ieee80211_ht_cap *pht_capie;
- u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
- u16 mcs_rate = 0;
- u32 ht_ielen = 0;
-
- if ((!check_fwstate(pmlmepriv, _FW_LINKED)) &&
- (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- return 0;
-
- if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N)) {
- p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength - 12);
- if (p && ht_ielen > 0) {
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
-
- memcpy(&mcs_rate, pht_capie->mcs.rx_mask, 2);
-
- /* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */
- bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0;
-
- short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
- short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
-
- max_rate = rtw_mcs_rate(bw_40MHz & (pregistrypriv->cbw40_enable),
- short_GI_20,
- short_GI_40,
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate);
- }
- } else {
- while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
- rate = pcur_bss->SupportedRates[i] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- i++;
- }
-
- max_rate *= 5;
- }
-
- return max_rate;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c
deleted file mode 100644
index 31e196ccd899..000000000000
--- a/drivers/staging/r8188eu/core/rtw_iol.c
+++ /dev/null
@@ -1,160 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/rtw_iol.h"
-
-struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter)
-{
- struct xmit_frame *xmit_frame;
- struct xmit_buf *xmitbuf;
- struct pkt_attrib *pattrib;
- struct xmit_priv *pxmitpriv = &adapter->xmitpriv;
-
- xmit_frame = rtw_alloc_xmitframe(pxmitpriv);
- if (!xmit_frame)
- return NULL;
-
- xmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (!xmitbuf) {
- rtw_free_xmitframe(pxmitpriv, xmit_frame);
- return NULL;
- }
-
- xmit_frame->frame_tag = MGNT_FRAMETAG;
- xmit_frame->pxmitbuf = xmitbuf;
- xmit_frame->buf_addr = xmitbuf->pbuf;
- xmitbuf->priv_data = xmit_frame;
-
- pattrib = &xmit_frame->attrib;
- update_mgntframe_attrib(adapter, pattrib);
- pattrib->qsel = 0x10;/* Beacon */
- pattrib->subtype = WIFI_BEACON;
- pattrib->pktlen = 0;
- pattrib->last_txcmdsz = 0;
-
- return xmit_frame;
-}
-
-int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len)
-{
- struct pkt_attrib *pattrib = &xmit_frame->attrib;
- u16 buf_offset;
- u32 ori_len;
-
- buf_offset = TXDESC_OFFSET;
- ori_len = buf_offset + pattrib->pktlen;
-
- /* check if the io_buf can accommodate new cmds */
- if (ori_len + cmd_len + 8 > MAX_XMITBUF_SZ)
- return _FAIL;
-
- memcpy(xmit_frame->buf_addr + buf_offset + pattrib->pktlen, IOL_cmds, cmd_len);
- pattrib->pktlen += cmd_len;
- pattrib->last_txcmdsz += cmd_len;
-
- return _SUCCESS;
-}
-
-bool rtw_IOL_applied(struct adapter *adapter)
-{
- if (adapter->registrypriv.fw_iol == 1)
- return true;
-
- if ((adapter->registrypriv.fw_iol == 2) &&
- (adapter_to_dvobj(adapter)->pusbdev->speed != USB_SPEED_HIGH))
- return true;
-
- return false;
-}
-
-int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
-{
- struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0};
-
- cmd.address = cpu_to_le16(addr);
- cmd.data = cpu_to_le32(value);
-
- if (mask != 0xFF) {
- cmd.length = 12;
- cmd.mask = cpu_to_le32(mask);
- }
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
-}
-
-int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask)
-{
- struct ioreg_cfg cmd = {8, IOREG_CMD_WW_REG, 0x0, 0x0, 0x0};
-
- cmd.address = cpu_to_le16(addr);
- cmd.data = cpu_to_le32(value);
-
- if (mask != 0xFFFF) {
- cmd.length = 12;
- cmd.mask = cpu_to_le32(mask);
- }
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
-}
-
-int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask)
-{
- struct ioreg_cfg cmd = {8, IOREG_CMD_WD_REG, 0x0, 0x0, 0x0};
-
- cmd.address = cpu_to_le16(addr);
- cmd.data = cpu_to_le32(value);
-
- if (mask != 0xFFFFFFFF) {
- cmd.length = 12;
- cmd.mask = cpu_to_le32(mask);
- }
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
-}
-
-int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask)
-{
- struct ioreg_cfg cmd = {8, IOREG_CMD_W_RF, 0x0, 0x0, 0x0};
-
- cmd.address = cpu_to_le16((rf_path << 8) | ((addr) & 0xFF));
- cmd.data = cpu_to_le32(value);
-
- if (mask != 0x000FFFFF) {
- cmd.length = 12;
- cmd.mask = cpu_to_le32(mask);
- }
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
-}
-
-int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us)
-{
- struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
- cmd.address = cpu_to_le16(us);
-
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
-}
-
-int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms)
-{
- struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
-
- cmd.address = cpu_to_le16(ms);
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
-}
-
-int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame)
-{
- struct ioreg_cfg cmd = {4, IOREG_CMD_END, cpu_to_le16(0xFFFF), cpu_to_le32(0xFF), 0x0};
-
- return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
-}
-
-u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame)
-{
- u8 is_cmd_bndy = false;
- if (((pxmit_frame->attrib.pktlen + 32) % 256) + 8 >= 256) {
- rtw_IOL_append_END_cmd(pxmit_frame);
- pxmit_frame->attrib.pktlen = ((((pxmit_frame->attrib.pktlen + 32) / 256) + 1) * 256);
-
- pxmit_frame->attrib.last_txcmdsz = pxmit_frame->attrib.pktlen;
- is_cmd_bndy = true;
- }
- return is_cmd_bndy;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
deleted file mode 100644
index 48725ce9d369..000000000000
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-#include "../include/rtw_led.h"
-#include "../include/rtl8188e_spec.h"
-
-#define LED_BLINK_NO_LINK_INTVL msecs_to_jiffies(1000)
-#define LED_BLINK_LINK_INTVL msecs_to_jiffies(500)
-#define LED_BLINK_SCAN_INTVL msecs_to_jiffies(180)
-#define LED_BLINK_FASTER_INTVL msecs_to_jiffies(50)
-#define LED_BLINK_WPS_SUCESS_INTVL msecs_to_jiffies(5000)
-
-#define IS_LED_WPS_BLINKING(l) \
- ((l)->CurrLedState == LED_BLINK_WPS || \
- (l)->CurrLedState == LED_BLINK_WPS_STOP || \
- (l)->bLedWPSBlinkInProgress)
-
-static void ResetLedStatus(struct led_priv *pLed)
-{
- pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
- pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
-
- pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
- pLed->bLedWPSBlinkInProgress = false;
-
- pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
-
- pLed->bLedScanBlinkInProgress = false;
-}
-
-static void SwLedOn(struct led_priv *pLed)
-{
- struct adapter *padapter = container_of(pLed, struct adapter, ledpriv);
-
- if (padapter->bDriverStopped)
- return;
-
- if (rtw_write8(padapter, REG_LEDCFG2, BIT(5)) != _SUCCESS)
- return;
-
- pLed->bLedOn = true;
-}
-
-static void SwLedOff(struct led_priv *pLed)
-{
- struct adapter *padapter = container_of(pLed, struct adapter, ledpriv);
-
- if (padapter->bDriverStopped)
- return;
-
- if (rtw_write8(padapter, REG_LEDCFG2, BIT(5) | BIT(3)) != _SUCCESS)
- return;
-
- pLed->bLedOn = false;
-}
-
-static void blink_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct led_priv *pLed = container_of(dwork, struct led_priv, blink_work);
- struct adapter *padapter = container_of(pLed, struct adapter, ledpriv);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(pLed);
- ResetLedStatus(pLed);
- return;
- }
-
- if (pLed->bLedOn)
- SwLedOff(pLed);
- else
- SwLedOn(pLed);
-
- switch (pLed->CurrLedState) {
- case LED_BLINK_SLOWLY:
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- break;
- case LED_BLINK_NORMAL:
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- break;
- case LED_BLINK_SCAN:
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_BLINK_NORMAL;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- } else {
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- }
- pLed->bLedBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
- } else {
- schedule_delayed_work(&pLed->blink_work,
- pLed->CurrLedState == LED_BLINK_SCAN ?
- LED_BLINK_SCAN_INTVL : LED_BLINK_FASTER_INTVL);
- }
- break;
- case LED_BLINK_WPS:
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- break;
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (!pLed->bLedOn) {
- pLed->CurrLedState = LED_BLINK_NORMAL;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
-
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
- }
- break;
- default:
- break;
- }
-}
-
-void rtl8188eu_InitSwLeds(struct adapter *padapter)
-{
- struct led_priv *pledpriv = &padapter->ledpriv;
-
- ResetLedStatus(pledpriv);
- INIT_DELAYED_WORK(&pledpriv->blink_work, blink_work);
-}
-
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- cancel_delayed_work_sync(&ledpriv->blink_work);
- ResetLedStatus(ledpriv);
- SwLedOff(ledpriv);
-}
-
-void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *pLed = &padapter->ledpriv;
- struct registry_priv *registry_par;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (!padapter->hw_init_completed)
- return;
-
- if (!pLed->bRegUseLed)
- return;
-
- registry_par = &padapter->registrypriv;
- if (!registry_par->led_enable)
- return;
-
- switch (LedAction) {
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
-
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = false;
-
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- break;
- case LED_CTL_LINK:
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
-
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = false;
-
- pLed->CurrLedState = LED_BLINK_NORMAL;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- break;
- case LED_CTL_SITE_SURVEY:
- if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED)))
- return;
-
- if (pLed->bLedScanBlinkInProgress)
- return;
-
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = true;
-
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress)
- return;
-
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
-
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = true;
-
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- if (pLed->bLedWPSBlinkInProgress)
- return;
-
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- break;
- case LED_CTL_STOP_WPS:
- cancel_delayed_work(&pLed->blink_work);
-
- pLed->bLedBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
- pLed->bLedWPSBlinkInProgress = true;
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
- } else {
- schedule_delayed_work(&pLed->blink_work, 0);
- }
- break;
- case LED_CTL_STOP_WPS_FAIL:
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedWPSBlinkInProgress = false;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->bLedBlinkInProgress = false;
- pLed->bLedWPSBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
- cancel_delayed_work(&pLed->blink_work);
- SwLedOff(pLed);
- break;
- default:
- break;
- }
-}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
deleted file mode 100644
index fb7d0e161fdd..000000000000
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ /dev/null
@@ -1,2067 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_MLME_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/hal_intf.h"
-#include "../include/sta_info.h"
-#include "../include/wifi.h"
-#include "../include/wlan_bssdef.h"
-#include "../include/rtw_ioctl_set.h"
-#include "../include/usb_osintf.h"
-#include "../include/rtl8188e_dm.h"
-
-extern unsigned char MCS_rate_1R[16];
-
-void rtw_set_roaming(struct adapter *adapter, u8 to_roaming)
-{
- if (to_roaming == 0)
- adapter->mlmepriv.to_join = false;
- adapter->mlmepriv.to_roaming = to_roaming;
-}
-
-u8 rtw_to_roaming(struct adapter *adapter)
-{
- return adapter->mlmepriv.to_roaming;
-}
-
-static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
-{
- kfree(*ppie);
- *plen = 0;
- *ppie = NULL;
-}
-
-void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
-{
- kfree(pmlmepriv->assoc_req);
- rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
-
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
-}
-
-void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
-{
- u32 curr_time, delta_time;
- u32 lifetime = SCANQUEUE_LIFETIME;
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
-
- if (!pnetwork)
- return;
-
- if (pnetwork->fixed)
- return;
- curr_time = jiffies;
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
- lifetime = 1;
- if (!isfreeall) {
- delta_time = (curr_time - pnetwork->last_scanned) / HZ;
- if (delta_time < lifetime)/* unit:sec */
- return;
- }
- spin_lock_bh(&free_queue->lock);
- list_del_init(&pnetwork->list);
- list_add_tail(&pnetwork->list, &free_queue->queue);
- pmlmepriv->num_of_scanned--;
- spin_unlock_bh(&free_queue->lock);
-}
-
-/*
- return the wlan_network with the matching addr
-
- Shall be called under atomic context... to avoid possible racing condition...
-*/
-struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
-{
- struct list_head *phead, *plist;
- struct wlan_network *pnetwork = NULL;
- u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-
- if (!memcmp(zero_addr, addr, ETH_ALEN)) {
- pnetwork = NULL;
- goto exit;
- }
- phead = get_list_head(scanned_queue);
- plist = phead->next;
-
- while (plist != phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
- break;
- plist = plist->next;
- }
- if (plist == phead)
- pnetwork = NULL;
-exit:
-
- return pnetwork;
-}
-
-void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
-{
- struct list_head *phead, *plist;
- struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
-
- spin_lock_bh(&scanned_queue->lock);
-
- phead = get_list_head(scanned_queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- plist = plist->next;
-
- _rtw_free_network(pmlmepriv, pnetwork, isfreeall);
- }
- spin_unlock_bh(&scanned_queue->lock);
-
-}
-
-int rtw_if_up(struct adapter *padapter)
-{
- int res;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
- !check_fwstate(&padapter->mlmepriv, _FW_LINKED))
- res = false;
- else
- res = true;
-
- return res;
-}
-
-void rtw_generate_random_ibss(u8 *pibss)
-{
- u32 curtime = jiffies;
-
- pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
- pibss[1] = 0x11;
- pibss[2] = 0x87;
- pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
- pibss[4] = (u8)((curtime >> 8) & 0xff);/* p[1]; */
- pibss[5] = (u8)((curtime >> 16) & 0xff);/* p[2]; */
-}
-
-u8 *rtw_get_capability_from_ie(u8 *ie)
-{
- return ie + 8 + 2;
-}
-
-u16 rtw_get_capability(struct wlan_bssid_ex *bss)
-{
- __le16 val;
-
- memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
-
- return le16_to_cpu(val);
-}
-
-u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
-{
- return ie + 8;
-}
-
-static void rtw_join_timeout_handler(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
-
- _rtw_join_timeout_handler(adapter);
-}
-
-static void _rtw_scan_timeout_handler(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.scan_to_timer);
-
- rtw_scan_timeout_handler(adapter);
-}
-
-static void _dynamic_check_timer_handlder(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
-
- rtw_dynamic_check_timer_handlder(adapter);
- _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
-}
-
-static void rtw_init_mlme_timer(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- timer_setup(&pmlmepriv->assoc_timer, rtw_join_timeout_handler, 0);
- timer_setup(&pmlmepriv->scan_to_timer, _rtw_scan_timeout_handler, 0);
- timer_setup(&pmlmepriv->dynamic_chk_timer, _dynamic_check_timer_handlder, 0);
-}
-
-int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
-{
- int i;
- u8 *pbuf;
- struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
-
- pmlmepriv->nic_hdl = (u8 *)padapter;
-
- pmlmepriv->pscanned = NULL;
- pmlmepriv->fw_state = 0;
- pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
- pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
-
- spin_lock_init(&pmlmepriv->lock);
- rtw_init_queue(&pmlmepriv->free_bss_pool);
- rtw_init_queue(&pmlmepriv->scanned_queue);
-
- set_scanned_network_val(pmlmepriv, 0);
-
- memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
-
- pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
-
- if (!pbuf)
- return -ENOMEM;
-
- pmlmepriv->free_bss_buf = pbuf;
-
- pnetwork = (struct wlan_network *)pbuf;
-
- for (i = 0; i < MAX_BSS_CNT; i++) {
- INIT_LIST_HEAD(&pnetwork->list);
-
- list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
-
- pnetwork++;
- }
-
- /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
-
- rtw_init_mlme_timer(padapter);
-
- return 0;
-}
-
-void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
-{
- rtw_free_mlme_priv_ie_data(pmlmepriv);
- vfree(pmlmepriv->free_bss_buf);
-}
-
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
-{
- struct wlan_network *pnetwork;
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct list_head *plist = NULL;
-
- spin_lock_bh(&free_queue->lock);
-
- if (list_empty(&free_queue->queue)) {
- pnetwork = NULL;
- goto exit;
- }
- plist = (&free_queue->queue)->next;
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- list_del_init(&pnetwork->list);
-
- pnetwork->network_type = 0;
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
- pnetwork->aid = 0;
- pnetwork->join_res = 0;
-
- pmlmepriv->num_of_scanned++;
-
-exit:
- spin_unlock_bh(&free_queue->lock);
-
- return pnetwork;
-}
-
-static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
- struct wlan_network *pnetwork)
-{
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
-
- if (!pnetwork)
- return;
- if (pnetwork->fixed)
- return;
- list_del_init(&pnetwork->list);
- list_add_tail(&pnetwork->list, get_list_head(free_queue));
- pmlmepriv->num_of_scanned--;
-}
-
-void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
-{
-
- _rtw_free_network_queue(dev, isfreeall);
-
-}
-
-/*
- return the wlan_network with the matching addr
-
- Shall be called under atomic context... to avoid possible racing condition...
-*/
-struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
-{
- struct wlan_network *pnetwork = _rtw_find_network(scanned_queue, addr);
-
- return pnetwork;
-}
-
-int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
-{
- int ret = true;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
- if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) &&
- (pnetwork->network.Privacy == 0))
- ret = false;
- else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) &&
- (pnetwork->network.Privacy == 1))
- ret = false;
- else
- ret = true;
- return ret;
-}
-
-static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
-{
- return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
- !memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
-}
-
-int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
-{
- u16 s_cap, d_cap;
- __le16 le_scap, le_dcap;
-
- memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
- memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
-
- s_cap = le16_to_cpu(le_scap);
- d_cap = le16_to_cpu(le_dcap);
-
- return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
- ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN))) &&
- ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength))) &&
- ((s_cap & WLAN_CAPABILITY_IBSS) ==
- (d_cap & WLAN_CAPABILITY_IBSS)) &&
- ((s_cap & WLAN_CAPABILITY_BSS) ==
- (d_cap & WLAN_CAPABILITY_BSS)));
-}
-
-struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
-{
- struct list_head *plist, *phead;
- struct wlan_network *pwlan = NULL;
- struct wlan_network *oldest = NULL;
-
- phead = get_list_head(scanned_queue);
-
- plist = phead->next;
-
- while (1) {
- if (phead == plist)
- break;
-
- pwlan = container_of(plist, struct wlan_network, list);
-
- if (!pwlan->fixed) {
- if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned))
- oldest = pwlan;
- }
-
- plist = plist->next;
- }
-
- return oldest;
-}
-
-void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
- struct adapter *padapter, bool update_ie)
-{
- long rssi_ori = dst->Rssi;
- u8 sq_smp = src->PhyInfo.SignalQuality;
- u8 ss_final;
- u8 sq_final;
- long rssi_final;
-
- AntDivCompare8188E(padapter, dst, src); /* this will update src.Rssi, need consider again */
-
- /* The rule below is 1/5 for sample value, 4/5 for history value */
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&padapter->mlmepriv.cur_network.network, src)) {
- /* Take the recvpriv's value for the connected AP*/
- ss_final = padapter->recvpriv.signal_strength;
- sq_final = padapter->recvpriv.signal_qual;
- /* the rssi value here is undecorated, and will be used for antenna diversity */
- if (sq_smp != 101) /* from the right channel */
- rssi_final = dst->Rssi; //(src->Rssi+dst->Rssi*4)/5;
- else
- rssi_final = rssi_ori;
- } else {
-// if (sq_smp != 101) { /* from the right channel */
- ss_final = (u32)dst->PhyInfo.SignalStrength; //((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
- sq_final = (u32)dst->PhyInfo.SignalQuality; //((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
- rssi_final = dst->Rssi; //(src->Rssi+dst->Rssi*4)/5;
-// } else {
-// /* bss info not receiving from the right channel, use the original RX signal infos */
-// ss_final = dst->PhyInfo.SignalStrength;
-// sq_final = dst->PhyInfo.SignalQuality;
-// rssi_final = dst->Rssi;
-// }
- }
- if (update_ie) {
- dst->Reserved[0] = src->Reserved[0];
- dst->Reserved[1] = src->Reserved[1];
- memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src));
- }
- dst->PhyInfo.SignalStrength = ss_final;
- dst->PhyInfo.SignalQuality = sq_final;
- dst->Rssi = rssi_final;
-
-}
-
-static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- is_same_network(&pmlmepriv->cur_network.network, pnetwork)) {
- update_network(&pmlmepriv->cur_network.network, pnetwork, adapter, true);
- }
-
-}
-
-u8 rtw_current_antenna(struct adapter *adapter)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
-
- return haldata->CurAntenna;
-}
-
-/*
-Caller must hold pmlmepriv->lock first.
-*/
-void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
-{
- struct list_head *plist, *phead;
- u32 bssid_ex_sz;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- struct wlan_network *oldest = NULL;
-
- spin_lock_bh(&queue->lock);
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- if (is_same_network(&pnetwork->network, target))
- break;
- if ((oldest == ((struct wlan_network *)0)) ||
- time_after(oldest->last_scanned, pnetwork->last_scanned))
- oldest = pnetwork;
- plist = plist->next;
- }
- /* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
- if (phead == plist) {
- if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
- /* If there are no more slots, expire the oldest */
- pnetwork = oldest;
-
- target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
-
- memcpy(&pnetwork->network, target, get_wlan_bssid_ex_sz(target));
- /* variable initialize */
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
-
- pnetwork->network_type = 0;
- pnetwork->aid = 0;
- pnetwork->join_res = 0;
-
- /* bss info not receiving from the right channel */
- if (pnetwork->network.PhyInfo.SignalQuality == 101)
- pnetwork->network.PhyInfo.SignalQuality = 0;
- } else {
- /* Otherwise just pull from the free list */
-
- pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
-
- if (!pnetwork)
- goto exit;
-
- bssid_ex_sz = get_wlan_bssid_ex_sz(target);
- target->Length = bssid_ex_sz;
- target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
- memcpy(&pnetwork->network, target, bssid_ex_sz);
-
- pnetwork->last_scanned = jiffies;
-
- /* bss info not receiving from the right channel */
- if (pnetwork->network.PhyInfo.SignalQuality == 101)
- pnetwork->network.PhyInfo.SignalQuality = 0;
- list_add_tail(&pnetwork->list, &queue->queue);
- }
- } else {
- /* we have an entry and we are going to update it. But this entry may
- * be already expired. In this case we do the same as we found a new
- * net and call the new_net handler
- */
- bool update_ie = true;
-
- pnetwork->last_scanned = jiffies;
-
- /* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
- /* probe resp(3) > beacon(1) > probe req(2) */
- if ((target->Reserved[0] != 2) &&
- (target->Reserved[0] >= pnetwork->network.Reserved[0]))
- update_ie = true;
- else
- update_ie = false;
- update_network(&pnetwork->network, target, adapter, update_ie);
- }
-
-exit:
- spin_unlock_bh(&queue->lock);
-
-}
-
-static void rtw_add_network(struct adapter *adapter,
- struct wlan_bssid_ex *pnetwork)
-{
-
- rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
- update_current_network(adapter, pnetwork);
- rtw_update_scanned_network(adapter, pnetwork);
-
-}
-
-/* select the desired network based on the capability of the (i)bss. */
-/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
-/* (4) HT */
-/* (5) others */
-static bool rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
-{
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u32 desired_encmode;
- u32 privacy;
-
- /* u8 wps_ie[512]; */
- uint wps_ielen;
-
- int bselected = true;
-
- desired_encmode = psecuritypriv->ndisencryptstatus;
- privacy = pnetwork->network.Privacy;
-
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, pnetwork->network.IELength - _FIXED_IE_LENGTH_, NULL, &wps_ielen))
- return true;
- else
- return false;
- }
- if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */
- u8 *p = NULL;
- uint ie_len = 0;
-
- if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0))
- bselected = false;
- if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) {
- p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_,
- _RSN_IE_2_, &ie_len,
- (pnetwork->network.IELength -
- _BEACON_IE_OFFSET_));
- if (p && ie_len > 0)
- bselected = true;
- else
- bselected = false;
- }
- }
-
- if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0))
- bselected = false;
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
- bselected = false;
- }
-
- return bselected;
-}
-
-void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- u32 len;
- struct wlan_bssid_ex *pnetwork;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- pnetwork = (struct wlan_bssid_ex *)pbuf;
-
- len = get_wlan_bssid_ex_sz(pnetwork);
- if (len > (sizeof(struct wlan_bssid_ex)))
- return;
- spin_lock_bh(&pmlmepriv->lock);
-
- /* update IBSS_network 's timestamp */
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, pnetwork->MacAddress, ETH_ALEN)) {
- struct wlan_network *ibss_wlan = NULL;
-
- memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
- if (ibss_wlan) {
- memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8);
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto exit;
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- }
- }
-
- /* lock pmlmepriv->lock when you accessing network_q */
- if (!check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- if (pnetwork->Ssid.Ssid[0] == 0)
- pnetwork->Ssid.SsidLength = 0;
- rtw_add_network(adapter, pnetwork);
- }
-
-exit:
-
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static void rtw_xmit_schedule(struct adapter *padapter)
-{
- struct xmit_priv *pxmitpriv;
-
- if (!padapter)
- return;
-
- pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- if (rtw_txframes_pending(padapter))
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (pmlmepriv->wps_probe_req_ie) {
- pmlmepriv->wps_probe_req_ie_len = 0;
- kfree(pmlmepriv->wps_probe_req_ie);
- pmlmepriv->wps_probe_req_ie = NULL;
- }
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- del_timer_sync(&pmlmepriv->scan_to_timer);
-
- spin_lock_bh(&pmlmepriv->lock);
- rtw_set_signal_stat_timer(&adapter->recvpriv);
-
- if (pmlmepriv->to_join) {
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
-
- if (rtw_select_and_join_from_scanned_queue(pmlmepriv) == _SUCCESS) {
- _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
- } else {
- struct wlan_bssid_ex *pdev_network = &adapter->registrypriv.dev_network;
- u8 *pibss = adapter->registrypriv.dev_network.MacAddress;
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
-
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
-
- rtw_update_registrypriv_dev_network(adapter);
- rtw_generate_random_ibss(pibss);
-
- pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
-
- rtw_createbss_cmd(adapter);
- pmlmepriv->to_join = false;
- }
- }
- } else {
- int s_ret;
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
- pmlmepriv->to_join = false;
- s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (s_ret == _SUCCESS) {
- _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
- } else {
- if (rtw_to_roaming(adapter) != 0) {
- if (--pmlmepriv->to_roaming == 0 ||
- rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1) != _SUCCESS) {
- rtw_set_roaming(adapter, 0);
- rtw_free_assoc_resources(adapter, 1);
- rtw_indicate_disconnect(adapter);
- } else {
- pmlmepriv->to_join = true;
- }
- } else {
- rtw_indicate_disconnect(adapter);
- }
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
- }
- }
-
- indicate_wx_scan_complete_event(adapter);
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
-
- rtw_xmit_schedule(adapter);
-}
-
-static void free_scanqueue(struct mlme_priv *pmlmepriv)
-{
- struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct __queue *scan_queue = &pmlmepriv->scanned_queue;
- struct list_head *plist, *phead, *ptemp;
-
- spin_lock_bh(&scan_queue->lock);
- spin_lock_bh(&free_queue->lock);
-
- phead = get_list_head(scan_queue);
- plist = phead->next;
-
- while (plist != phead) {
- ptemp = plist->next;
- list_del_init(plist);
- list_add_tail(plist, &free_queue->queue);
- plist = ptemp;
- pmlmepriv->num_of_scanned--;
- }
-
- spin_unlock_bh(&free_queue->lock);
- spin_unlock_bh(&scan_queue->lock);
-}
-
-/*
-*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
-*/
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
-{
- struct wlan_network *pwlan = NULL;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) {
- struct sta_info *psta;
-
- psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- }
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) {
- struct sta_info *psta;
-
- rtw_free_all_stainfo(adapter);
-
- psta = rtw_get_bcmc_stainfo(adapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- rtw_init_bcmc_stainfo(adapter);
- }
-
- if (lock_scanned_queue)
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
- if (pwlan)
- pwlan->fixed = false;
-
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
- rtw_free_network_nolock(pmlmepriv, pwlan);
-
- if (lock_scanned_queue)
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- pmlmepriv->key_mask = 0;
-
-}
-
-static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
-
-static void rtw_reset_securitypriv(struct adapter *adapter)
-{
- u8 backup_index;
- u8 backup_counter;
- u32 backup_time;
-
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- /* 802.1x */
- /* We have to backup the PMK information for WiFi PMK Caching test item. */
- /* Backup the btkip_countermeasure information. */
- /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- backup_index = adapter->securitypriv.PMKIDIndex;
- backup_counter = adapter->securitypriv.btkip_countermeasure;
- backup_time = adapter->securitypriv.btkip_countermeasure_time;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
-
- /* Restore the PMK information to securitypriv structure for the following connection. */
- memcpy(&adapter->securitypriv.PMKIDList[0],
- &backup_pmkid[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- adapter->securitypriv.PMKIDIndex = backup_index;
- adapter->securitypriv.btkip_countermeasure = backup_counter;
- adapter->securitypriv.btkip_countermeasure_time = backup_time;
- adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
- } else {
- /* reset values in securitypriv */
- struct security_priv *psec_priv = &adapter->securitypriv;
-
- psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psec_priv->dot11PrivacyKeyIndex = 0;
- psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- psec_priv->dot118021XGrpKeyid = 1;
- psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
- psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
- }
-}
-
-/*
-*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
-*/
-void rtw_indicate_connect(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- pmlmepriv->to_join = false;
-
- if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- set_fwstate(pmlmepriv, _FW_LINKED);
-
- rtw_led_control(padapter, LED_CTL_LINK);
-
- rtw_indicate_wx_assoc_event(padapter);
- netif_carrier_on(padapter->pnetdev);
- if (padapter->pid[2] != 0)
- rtw_signal_process(padapter->pid[2], SIGALRM);
- }
-
- pmlmepriv->to_roaming = 0;
-}
-
-/*
-*rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
-*/
-void rtw_indicate_disconnect(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
-
- if (pmlmepriv->to_roaming > 0)
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
-
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
- (pmlmepriv->to_roaming <= 0)) {
- /* Do it first for tx broadcast pkt after disconnection issue! */
- netif_carrier_off(padapter->pnetdev);
-
- rtw_indicate_wx_disassoc_event(padapter);
- rtw_reset_securitypriv(padapter);
-
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
- }
- p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
-
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
-
-}
-
-inline void rtw_indicate_scan_done(struct adapter *padapter)
-{
- indicate_wx_scan_complete_event(padapter);
-}
-
-static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
-{
- int i;
- struct sta_info *bmc_sta, *psta = NULL;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
- if (!psta)
- psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
-
- if (psta) { /* update ptarget_sta */
- psta->aid = pnetwork->join_res;
- psta->mac_id = 0;
- /* sta mode */
- rtl8188e_SetHalODMVar(padapter, psta, true);
- /* security related */
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- padapter->securitypriv.binstallGrpkey = false;
- padapter->securitypriv.busetkipkey = false;
- padapter->securitypriv.bgrpkey_handshake = false;
- psta->ieee8021x_blocked = true;
- psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof(union Keytype));
- memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof(union Keytype));
- memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof(union Keytype));
- memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
- memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
- }
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */
- if (padapter->securitypriv.wps_ie_len != 0) {
- psta->ieee8021x_blocked = true;
- padapter->securitypriv.wps_ie_len = 0;
- }
- /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
- /* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
- /* todo: check if AP can send A-MPDU packets */
- for (i = 0; i < 16; i++) {
- /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
- }
- bmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (bmc_sta) {
- for (i = 0; i < 16; i++) {
- /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
- preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
- }
- }
- /* misc. */
- update_sta_info(padapter, psta);
- }
- return psta;
-}
-
-/* pnetwork: returns from rtw_joinbss_event_callback */
-/* ptarget_wlan: found from scanned_queue */
-static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
-
- /* why not use ptarget_wlan?? */
- memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
- /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
- cur_network->network.IELength = ptarget_wlan->network.IELength;
- memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
-
- cur_network->aid = pnetwork->join_res;
-
- rtw_set_signal_stat_timer(&padapter->recvpriv);
- padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
- padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
- /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */
- padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength);
- rtw_set_signal_stat_timer(&padapter->recvpriv);
-
- /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
- switch (pnetwork->network.InfrastructureMode) {
- case Ndis802_11Infrastructure:
- if (pmlmepriv->fw_state & WIFI_UNDER_WPS)
- pmlmepriv->fw_state = WIFI_STATION_STATE | WIFI_UNDER_WPS;
- else
- pmlmepriv->fw_state = WIFI_STATION_STATE;
- break;
- case Ndis802_11IBSS:
- pmlmepriv->fw_state = WIFI_ADHOC_STATE;
- break;
- default:
- pmlmepriv->fw_state = WIFI_NULL_STATE;
- break;
- }
-
- rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
-}
-
-/* Notes: the function could be > passive_level (the same context as Rx tasklet) */
-/* pnetwork: returns from rtw_joinbss_event_callback */
-/* ptarget_wlan: found from scanned_queue */
-/* if join_res > 0, for (fw_state == WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
-/* if join_res > 0, for (fw_state == WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */
-/* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan != NULL). */
-
-void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
-{
- struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
- unsigned int the_same_macaddr = false;
-
- the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
-
- pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
- if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex))
- return;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (pnetwork->join_res > 0) {
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- /* s1. find ptarget_wlan */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr) {
- ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
- } else {
- pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
- if (pcur_wlan)
- pcur_wlan->fixed = false;
-
- pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
- if (pcur_sta) {
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(adapter, pcur_sta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- }
-
- ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- if (ptarget_wlan)
- ptarget_wlan->fixed = true;
- }
- }
- } else {
- ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- if (ptarget_wlan)
- ptarget_wlan->fixed = true;
- }
- }
-
- /* s2. update cur_network */
- if (ptarget_wlan) {
- rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
- } else {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
-
- /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
- if (!ptarget_sta) {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
- }
-
- /* s4. indicate connect */
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- pmlmepriv->cur_network_scanned = ptarget_wlan;
- rtw_indicate_connect(adapter);
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
- /* s5. Cancel assoc_timer */
- del_timer_sync(&pmlmepriv->assoc_timer);
- spin_lock_bh(&pmlmepriv->lock);
- } else {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- } else if (pnetwork->join_res == -4) {
- rtw_reset_securitypriv(adapter);
- _set_timer(&pmlmepriv->assoc_timer, 1);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- } else { /* if join_res < 0 (join fails), then try again */
- _set_timer(&pmlmepriv->assoc_timer, 1);
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
-
-ignore_joinbss_callback:
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
-
- mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
-
- rtw_xmit_schedule(adapter);
-}
-
-void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
-{
- rtw_write8(adapter, REG_TX_RPT_CTRL + 1, macid + 1);
-}
-
-static u8 search_max_mac_id(struct adapter *padapter)
-{
- u8 mac_id;
- u8 aid;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
- if (pstapriv->sta_aid[aid - 1])
- break;
- }
- mac_id = aid + 1;
- } else {
- /* adhoc id = 31~2 */
- for (mac_id = (NUM_STA - 1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
- if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
- break;
- }
- }
- return mac_id;
-}
-
-/* FOR AP , AD-HOC mode */
-void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
- u32 mstatus)
-{
- u16 media_status_rpt;
- u8 macid;
-
- if (!psta)
- return;
-
- macid = search_max_mac_id(adapter);
- rtw_set_max_rpt_macid(adapter, macid);
-
- /* MACID|OPMODE:1 connect */
- media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
- rtl8188e_set_FwMediaStatus_cmd(adapter, media_status_rpt);
-}
-
-void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- struct sta_info *psta;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct wlan_network *ptarget_wlan = NULL;
-
- if (!rtw_access_ctrl(adapter, pstassoc->macaddr))
- return;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta)
- rtw_indicate_sta_assoc_event(adapter, psta);
- return;
- }
- /* for AD-HOC mode */
- psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta)
- /* the sta have been in sta_info_queue => do nothing */
- return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
- psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (!psta)
- return;
- /* to do: init sta_info variable */
- psta->qos_option = 0;
- psta->mac_id = (uint)pstassoc->cam_id;
-
- /* for ad-hoc mode */
- rtl8188e_SetHalODMVar(adapter, psta, true);
- rtw_sta_media_status_rpt(adapter, psta, 1);
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
- psta->ieee8021x_blocked = false;
- spin_lock_bh(&pmlmepriv->lock);
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) {
- if (adapter->stapriv.asoc_sta_count == 2) {
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
- pmlmepriv->cur_network_scanned = ptarget_wlan;
- if (ptarget_wlan)
- ptarget_wlan->fixed = true;
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- rtw_indicate_connect(adapter);
- }
- }
- spin_unlock_bh(&pmlmepriv->lock);
- mlmeext_sta_add_event_callback(adapter, psta);
-}
-
-void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- int mac_id = -1;
- struct sta_info *psta;
- struct wlan_network *pwlan = NULL;
- struct wlan_bssid_ex *pdev_network = NULL;
- u8 *pibss = NULL;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct stadel_event *pstadel = (struct stadel_event *)pbuf;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-
- psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
- if (psta)
- mac_id = psta->mac_id;
- else
- mac_id = pstadel->mac_id;
-
- if (mac_id >= 0) {
- u16 media_status;
- media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */
- /* for STA, AP, ADHOC mode, report disconnect stauts to FW */
- rtl8188e_set_FwMediaStatus_cmd(adapter, media_status);
- }
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return;
-
- mlmeext_sta_del_event_callback(adapter);
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- if (adapter->registrypriv.wifi_spec == 1)
- rtw_set_roaming(adapter, 0); /* don't roam */
- else if (rtw_to_roaming(adapter) > 0)
- pmlmepriv->to_roaming--; /* this stadel_event is caused by roaming, decrease to_roaming */
- else if (rtw_to_roaming(adapter) == 0)
- rtw_set_roaming(adapter,
- adapter->registrypriv.max_roaming_times);
-
- if (*((unsigned short *)(pstadel->rsvd)) != WLAN_REASON_EXPIRATION_CHK)
- rtw_set_roaming(adapter, 0); /* don't roam */
-
- rtw_free_uc_swdec_pending_queue(adapter);
-
- rtw_free_assoc_resources(adapter, 1);
- rtw_indicate_disconnect(adapter);
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- /* remove the network entry in scanned_queue */
- pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
- if (pwlan) {
- pwlan->fixed = false;
- rtw_free_network_nolock(pmlmepriv, pwlan);
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- _rtw_roaming(adapter, tgt_network);
- }
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- /* free old ibss network */
- pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
- if (pwlan) {
- pwlan->fixed = false;
- rtw_free_network_nolock(pmlmepriv, pwlan);
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- /* re-create ibss */
- pdev_network = &adapter->registrypriv.dev_network;
- pibss = adapter->registrypriv.dev_network.MacAddress;
-
- memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
-
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
-
- rtw_update_registrypriv_dev_network(adapter);
-
- rtw_generate_random_ibss(pibss);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
- _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
- }
-
- rtw_createbss_cmd(adapter);
- }
- }
- spin_unlock_bh(&pmlmepriv->lock);
-
-}
-
-/*
-* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
-* @adapter: pointer to struct adapter structure
-*/
-void _rtw_join_timeout_handler (struct adapter *adapter)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- int do_join_r;
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return;
-
- spin_lock_irq(&pmlmepriv->lock);
-
- if (rtw_to_roaming(adapter) > 0) { /* join timeout caused by roaming */
- while (1) {
- pmlmepriv->to_roaming--;
- if (rtw_to_roaming(adapter) != 0) { /* try another */
- do_join_r = rtw_do_join(adapter);
- if (do_join_r != _SUCCESS)
- continue;
- break;
- } else {
- rtw_indicate_disconnect(adapter);
- break;
- }
- }
- } else {
- rtw_indicate_disconnect(adapter);
- free_scanqueue(pmlmepriv);/* */
- }
- spin_unlock_irq(&pmlmepriv->lock);
-
-}
-
-/*
-* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
-* @adapter: pointer to struct adapter structure
-*/
-void rtw_scan_timeout_handler (struct adapter *adapter)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- spin_lock_bh(&pmlmepriv->lock);
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- spin_unlock_bh(&pmlmepriv->lock);
- rtw_indicate_scan_done(adapter);
-}
-
-static void rtw_auto_scan_handler(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* auto site survey per 60sec */
- if (pmlmepriv->scan_interval > 0) {
- pmlmepriv->scan_interval--;
- if (pmlmepriv->scan_interval == 0) {
- rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
- }
- }
-}
-
-void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
-
- if (!adapter)
- return;
- if (!adapter->hw_init_completed)
- return;
- if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved))
- return;
- if (adapter->net_closed)
- return;
- rtw_dynamic_chk_wk_cmd(adapter);
-
- if (pregistrypriv->wifi_spec == 1) {
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- /* auto site survey */
- rtw_auto_scan_handler(adapter);
- }
- }
-
- rcu_read_lock();
-
- if (rcu_dereference(adapter->pnetdev->rx_handler_data) &&
- check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
- /* expire NAT2.5 entry */
- nat25_db_expire(adapter);
-
- if (adapter->pppoe_connection_in_progress > 0)
- adapter->pppoe_connection_in_progress--;
-
- /* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */
- if (adapter->pppoe_connection_in_progress > 0)
- adapter->pppoe_connection_in_progress--;
- }
-
- rcu_read_unlock();
-}
-
-#define RTW_SCAN_RESULT_EXPIRE 2000
-
-/*
-* Select a new join candidate from the original @param candidate and @param competitor
-* @return true: candidate is updated
-* @return false: candidate is not updated
-*/
-static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
- , struct wlan_network **candidate, struct wlan_network *competitor)
-{
- int updated = false;
- struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
- unsigned long scan_res_expire;
-
- /* check bssid, if needed */
- if (pmlmepriv->assoc_by_bssid) {
- if (memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
- goto exit;
- }
-
- /* check ssid, if needed */
- if (pmlmepriv->assoc_ssid.SsidLength) {
- if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
- memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength))
- goto exit;
- }
-
- if (!rtw_is_desired_network(adapter, competitor))
- goto exit;
-
- scan_res_expire = competitor->last_scanned + msecs_to_jiffies(RTW_SCAN_RESULT_EXPIRE);
- if (rtw_to_roaming(adapter) > 0) {
- if (time_after(jiffies, scan_res_expire) ||
- !is_same_ess(&competitor->network, &pmlmepriv->cur_network.network))
- goto exit;
- }
-
- if (!*candidate || (*candidate)->network.Rssi < competitor->network.Rssi) {
- *candidate = competitor;
- updated = true;
- }
-
-exit:
- return updated;
-}
-
-/*
-Calling context:
-The caller of the sub-routine will be in critical section...
-The caller must hold the following spinlock
-pmlmepriv->lock
-*/
-
-int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
-{
- int ret;
- struct list_head *phead;
- struct adapter *adapter;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- struct wlan_network *candidate = NULL;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- phead = get_list_head(queue);
- adapter = (struct adapter *)pmlmepriv->nic_hdl;
- pmlmepriv->pscanned = phead->next;
- while (phead != pmlmepriv->pscanned) {
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
- rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
- }
- if (!candidate) {
- ret = _FAIL;
- goto exit;
- }
-
- /* check for situation of _FW_LINKED */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- rtw_disassoc_cmd(adapter, 0, true);
- rtw_indicate_disconnect(adapter);
- rtw_free_assoc_resources(adapter, 0);
- }
-
- ret = rtw_joinbss_cmd(adapter, candidate);
-
-exit:
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- return ret;
-}
-
-int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
-{
- struct cmd_obj *pcmd;
- struct setauth_parm *psetauthparm;
- struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
- int res = _SUCCESS;
-
- pcmd = kzalloc(sizeof(*pcmd), GFP_KERNEL);
- if (!pcmd) {
- res = _FAIL; /* try again */
- goto exit;
- }
-
- psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_KERNEL);
- if (!psetauthparm) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
- psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
- pcmd->cmdcode = _SetAuth_CMD_;
- pcmd->parmbuf = (unsigned char *)psetauthparm;
- pcmd->cmdsz = (sizeof(struct setauth_parm));
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
- INIT_LIST_HEAD(&pcmd->list);
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
-exit:
-
- return res;
-}
-
-int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, int keyid, u8 set_tx)
-{
- u8 keylen;
- struct cmd_obj *pcmd;
- struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- int res = _SUCCESS;
-
- pcmd = kzalloc(sizeof(*pcmd), GFP_KERNEL);
- if (!pcmd) {
- res = _FAIL; /* try again */
- goto exit;
- }
- psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_KERNEL);
- if (!psetkeyparm) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
- else
- psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
- psetkeyparm->keyid = (u8)keyid;/* 0~3 */
- psetkeyparm->set_tx = set_tx;
- pmlmepriv->key_mask |= BIT(psetkeyparm->keyid);
-
- switch (psetkeyparm->algorithm) {
- case _WEP40_:
- keylen = 5;
- memcpy(&psetkeyparm->key[0], &psecuritypriv->dot11DefKey[keyid].skey[0], keylen);
- break;
- case _WEP104_:
- keylen = 13;
- memcpy(&psetkeyparm->key[0], &psecuritypriv->dot11DefKey[keyid].skey[0], keylen);
- break;
- case _TKIP_:
- keylen = 16;
- memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
- psetkeyparm->grpkey = 1;
- break;
- case _AES_:
- keylen = 16;
- memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
- psetkeyparm->grpkey = 1;
- break;
- default:
- kfree(psetkeyparm);
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
- pcmd->cmdcode = _SetKey_CMD_;
- pcmd->parmbuf = (u8 *)psetkeyparm;
- pcmd->cmdsz = (sizeof(struct setkey_parm));
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
- INIT_LIST_HEAD(&pcmd->list);
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
-exit:
- return res;
-}
-
-/* adjust IEs for rtw_joinbss_cmd in WMM */
-int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len)
-{
- unsigned int ielength = 0;
- unsigned int i, j;
-
- i = 12; /* after the fixed IE */
- while (i < in_len) {
- ielength = initial_out_len;
-
- if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) {
- /* WMM element ID and OUI */
- /* Append WMM IE to the last index of out_ie */
-
- for (j = i; j < i + 9; j++) {
- out_ie[ielength] = in_ie[j];
- ielength++;
- }
- out_ie[initial_out_len + 1] = 0x07;
- out_ie[initial_out_len + 6] = 0x00;
- out_ie[initial_out_len + 8] = 0x00;
- break;
- }
- i += (in_ie[i + 1] + 2); /* to the next IE element */
- }
- return ielength;
-}
-
-/* */
-/* Search by BSSID, */
-/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
-/* */
-/* */
-
-static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
-{
- struct security_priv *p = &Adapter->securitypriv;
- int i;
-
- for (i = 0; i < NUM_PMKID_CACHE; i++)
- if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
- return i;
- return -1;
-}
-
-/* */
-/* Check the RSN IE length */
-/* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */
-/* 0-11th element in the array are the fixed IE */
-/* 12th element in the array is the IE */
-/* 13th element in the array is the IE length */
-/* */
-
-static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie_len)
-{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
-
- if (ie[13] <= 20) {
- /* The RSN IE didn't include the PMK ID, append the PMK information */
- ie[ie_len] = 1;
- ie_len++;
- ie[ie_len] = 0; /* PMKID count = 0x0100 */
- ie_len++;
- memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
-
- ie_len += 16;
- ie[13] += 18;/* PMKID length = 2+16 */
- }
- return ie_len;
-}
-
-static void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
-{
- uint len;
- u8 *buff, *p, i;
- union iwreq_data wrqu;
-
- buff = NULL;
- if (authmode == _WPA_IE_ID_) {
- buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (!buff)
- return;
- p = buff;
- p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1] + 2;
- len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0; i < len; i++)
- p += sprintf(p, "%02x", sec_ie[i]);
- p += sprintf(p, ")");
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = p - buff;
- wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
- wrqu.data.length : IW_CUSTOM_MAX;
- wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
- kfree(buff);
- }
-}
-
-int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
-{
- u8 authmode = 0;
- uint ielength;
- int iEntry;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- uint ndisauthmode = psecuritypriv->ndisauthtype;
-
- /* copy fixed ie only */
- memcpy(out_ie, in_ie, 12);
- ielength = 12;
- if ((ndisauthmode == Ndis802_11AuthModeWPA) ||
- (ndisauthmode == Ndis802_11AuthModeWPAPSK))
- authmode = _WPA_IE_ID_;
- if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
- (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
- authmode = _WPA2_IE_ID_;
-
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- memcpy(out_ie + ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
-
- ielength += psecuritypriv->wps_ie_len;
- } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) {
- /* copy RSN or SSN */
- memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1] + 2);
- ielength += psecuritypriv->supplicant_ie[1] + 2;
- rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie);
- }
-
- iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
- if (iEntry < 0) {
- return ielength;
- } else {
- if (authmode == _WPA2_IE_ID_)
- ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
- }
-
- return ielength;
-}
-
-void rtw_init_registrypriv_dev_network(struct adapter *adapter)
-{
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct eeprom_priv *peepriv = &adapter->eeprompriv;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- u8 *myhwaddr = myid(peepriv);
-
- memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
-
- memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
-
- pdev_network->Configuration.Length = sizeof(struct ndis_802_11_config);
- pdev_network->Configuration.BeaconPeriod = 100;
- pdev_network->Configuration.FHConfig.Length = 0;
- pdev_network->Configuration.FHConfig.HopPattern = 0;
- pdev_network->Configuration.FHConfig.HopSet = 0;
- pdev_network->Configuration.FHConfig.DwellTime = 0;
-
-}
-
-void rtw_update_registrypriv_dev_network(struct adapter *adapter)
-{
- int sz = 0;
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
-
- pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
-
- pdev_network->Rssi = 0;
-
- pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
-
- if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
- pdev_network->Configuration.ATIMWindow = (0);
-
- pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
-
- /* 1. Supported rates */
- /* 2. IE */
-
- sz = rtw_generate_ie(pregistrypriv);
- pdev_network->IELength = sz;
- pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
-
- /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
- /* pdev_network->IELength = cpu_to_le32(sz); */
-
-}
-
-static void rtw_set_threshold(struct adapter *adapter)
-{
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
- struct ht_priv *htpriv = &mlmepriv->htpriv;
-
- if (htpriv->ht_option && adapter->registrypriv.wifi_spec != 1) {
- /* validate usb rx aggregation, use init value. */
- rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, USB_RXAGG_PAGE_COUNT);
- } else {
- /* invalidate usb rx aggregation */
- rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, 1);
- }
-}
-
-/* the function is at passive_level */
-void rtw_joinbss_reset(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */
- pmlmepriv->num_FortyMHzIntolerant = 0;
-
- pmlmepriv->num_sta_no_ht = 0;
-
- phtpriv->ampdu_enable = false;/* reset to disabled */
-
- rtw_set_threshold(padapter);
-}
-
-/* the function is >= passive_level */
-unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len)
-{
- u32 ielen, out_len;
- unsigned char *p;
- struct ieee80211_ht_cap ht_capie;
- unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- phtpriv->ht_option = false;
-
- p = rtw_get_ie(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12);
-
- if (p && ielen > 0) {
- if (pqospriv->qos_option == 0) {
- out_len = *pout_len;
- rtw_set_ie(out_ie + out_len, _VENDOR_SPECIFIC_IE_,
- _WMM_IE_Length_, WMM_IE, pout_len);
-
- pqospriv->qos_option = 1;
- }
-
- out_len = *pout_len;
-
- memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
-
- ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_DSSSCCK40);
-
- /*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
- */
-
- ht_capie.ampdu_params_info = (MAX_AMPDU_FACTOR_64K & 0x03);
-
- if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
- ht_capie.ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & (0x07 << 2));
- else
- ht_capie.ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00);
-
- rtw_set_ie(out_ie + out_len, _HT_CAPABILITY_IE_,
- sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
-
- phtpriv->ht_option = true;
-
- p = rtw_get_ie(in_ie + 12, _HT_ADD_INFO_IE_, &ielen, in_len - 12);
- if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
- out_len = *pout_len;
- rtw_set_ie(out_ie + out_len, _HT_ADD_INFO_IE_, ielen, p + 2, pout_len);
- }
- }
- return phtpriv->ht_option;
-}
-
-/* the function is > passive_level (in critical_section) */
-void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
-{
- u8 *p, max_ampdu_sz;
- int len;
- struct ieee80211_ht_cap *pht_capie;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (!phtpriv->ht_option)
- return;
-
- if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
- return;
-
- /* maybe needs check if ap supports rx ampdu. */
- if ((!phtpriv->ampdu_enable) && (pregistrypriv->ampdu_enable == 1)) {
- if (pregistrypriv->wifi_spec == 1)
- phtpriv->ampdu_enable = false;
- else
- phtpriv->ampdu_enable = true;
- } else if (pregistrypriv->ampdu_enable == 2) {
- phtpriv->ampdu_enable = true;
- }
-
- /* check Max Rx A-MPDU Size */
- len = 0;
- p = rtw_get_ie(pie + sizeof(struct ndis_802_11_fixed_ie), _HT_CAPABILITY_IE_, &len, ie_len - sizeof(struct ndis_802_11_fixed_ie));
- if (p && len > 0) {
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR);
- max_ampdu_sz = 1 << (max_ampdu_sz + 3); /* max_ampdu_sz (kbytes); */
- phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
- }
- len = 0;
- p = rtw_get_ie(pie + sizeof(struct ndis_802_11_fixed_ie), _HT_ADD_INFO_IE_, &len, ie_len - sizeof(struct ndis_802_11_fixed_ie));
-
- /* update cur_bwmode & cur_ch_offset */
- if ((pregistrypriv->cbw40_enable) &&
- (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
- (pmlmeinfo->HT_info.infos[0] & BIT(2))) {
- int i;
-
- /* update the MCS rates */
- for (i = 0; i < 16; i++)
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
-
- /* switch to the 40M Hz mode according to the AP */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
- case HT_EXTCHNL_OFFSET_UPPER:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case HT_EXTCHNL_OFFSET_LOWER:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- }
-
- /* Config SM Power Save setting */
- pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
-
- /* Config current HT Protection mode. */
- pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
-}
-
-void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- u8 issued;
- int priority;
- struct sta_info *psta = NULL;
- struct ht_priv *phtpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- if (is_multicast_ether_addr(pattrib->ra) ||
- padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100)
- return;
-
- priority = pattrib->priority;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
-
- if (!psta)
- return;
-
- phtpriv = &psta->htpriv;
-
- if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
- issued = (phtpriv->agg_enable_bitmap >> priority) & 0x1;
- issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
-
- if (issued == 0) {
- psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
- rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
- }
- }
-}
-
-void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- spin_lock_bh(&pmlmepriv->lock);
- _rtw_roaming(padapter, tgt_network);
- spin_unlock_bh(&pmlmepriv->lock);
-}
-void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int do_join_r;
-
- struct wlan_network *pnetwork;
-
- if (tgt_network)
- pnetwork = tgt_network;
- else
- pnetwork = &pmlmepriv->cur_network;
-
- if (rtw_to_roaming(padapter) > 0) {
- memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
-
- pmlmepriv->assoc_by_bssid = false;
-
- while (1) {
- do_join_r = rtw_do_join(padapter);
- if (do_join_r == _SUCCESS) {
- break;
- } else {
- pmlmepriv->to_roaming--;
-
- if (pmlmepriv->to_roaming > 0) {
- continue;
- } else {
- rtw_indicate_disconnect(padapter);
- break;
- }
- }
- }
- }
-}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
deleted file mode 100644
index dc181e491b34..000000000000
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ /dev/null
@@ -1,7817 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_MLME_EXT_C_
-
-#include <linux/ieee80211.h>
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/rtw_mlme_ext.h"
-#include "../include/wlan_bssdef.h"
-#include "../include/rtl8188e_xmit.h"
-#include "../include/rtl8188e_dm.h"
-
-static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-
-/**************************************************
-OUI definitions for the vendor specific IE
-***************************************************/
-unsigned char RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
-unsigned char WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
-unsigned char WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
-unsigned char P2P_OUI[] = {0x50, 0x6F, 0x9A, 0x09};
-unsigned char WFD_OUI[] = {0x50, 0x6F, 0x9A, 0x0A};
-
-unsigned char WMM_INFO_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
-unsigned char WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
-
-unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
-unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
-
-extern unsigned char REALTEK_96B_IE[];
-
-/********************************************************
-MCS rate definitions
-*********************************************************/
-unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-
-/********************************************************
-ChannelPlan definitions
-*********************************************************/
-static struct rt_channel_plan RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
- {{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
- {{}, 0}, /* 0x05, RT_CHANNEL_DOMAIN_2G_NULL */
-};
-
-static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
- /* 0x00 ~ 0x1F , Old Define ===== */
- {0x02}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
- {0x02}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
- {0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
- {0x01}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
- {0x01}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
- {0x03}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
- {0x03}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
- {0x01}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
- {0x03}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
- {0x03}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
- {0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
- {0x02}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
- {0x01}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
- {0x02}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
- {0x02}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
- {0x02}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
- {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
- {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
- {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x00}, /* 0x13 */
- {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
- {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
- {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
- {0x03}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x05}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
- {0x02}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
- {0x00}, /* 0x1A, */
- {0x00}, /* 0x1B, */
- {0x00}, /* 0x1C, */
- {0x00}, /* 0x1D, */
- {0x00}, /* 0x1E, */
- {0x00}, /* 0x1F, */
- /* 0x20 ~ 0x7F , New Define ===== */
- {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
- {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
- {0x02}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
- {0x03}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
- {0x04}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
- {0x02}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
- {0x00}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
- {0x03}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
- {0x00}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
- {0x00}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
- {0x00}, /* 0x2A, */
- {0x00}, /* 0x2B, */
- {0x00}, /* 0x2C, */
- {0x00}, /* 0x2D, */
- {0x00}, /* 0x2E, */
- {0x00}, /* 0x2F, */
- {0x00}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
- {0x00}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
- {0x00}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
- {0x00}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
- {0x02}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
- {0x00}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
- {0x00}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
- {0x03}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
- {0x03}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
- {0x02}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
- {0x00}, /* 0x3A, */
- {0x00}, /* 0x3B, */
- {0x00}, /* 0x3C, */
- {0x00}, /* 0x3D, */
- {0x00}, /* 0x3E, */
- {0x00}, /* 0x3F, */
- {0x02}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
- {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
-};
-
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
-
-/*
- * Search the @param channel_num in given @param channel_set
- * @ch_set: the given channel set
- * @ch: the given channel number
- *
- * return the index of channel_num in channel_set, -1 if not found
- */
-int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch)
-{
- int i;
- for (i = 0; ch_set[i].ChannelNum != 0; i++) {
- if (ch == ch_set[i].ChannelNum)
- break;
- }
-
- if (i >= ch_set[i].ChannelNum)
- return -1;
- return i;
-}
-
-/****************************************************************************
-
-Following are the initialization functions for WiFi MLME
-
-*****************************************************************************/
-
-int init_hw_mlme_ext(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
- return _SUCCESS;
-}
-
-static void init_mlme_ext_priv_value(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- unsigned char mixed_datarate[NumRates] = {
- _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
- _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_,
- _48M_RATE_, _54M_RATE_, 0xff
- };
- unsigned char mixed_basicrate[NumRates] = {
- _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
- _12M_RATE_, _24M_RATE_, 0xff,
- };
-
- atomic_set(&pmlmeext->event_seq, 0);
- pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
-
- pmlmeext->cur_channel = padapter->registrypriv.channel;
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeext->retry = 0;
-
- pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
-
- memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
- memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
-
- pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
-
- pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
- pmlmeext->sitesurvey_res.channel_idx = 0;
- pmlmeext->sitesurvey_res.bss_cnt = 0;
- pmlmeext->scan_abort = false;
-
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- pmlmeinfo->reauth_count = 0;
- pmlmeinfo->reassoc_count = 0;
- pmlmeinfo->link_count = 0;
- pmlmeinfo->auth_seq = 0;
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
- pmlmeinfo->key_index = 0;
- pmlmeinfo->iv = 0;
-
- pmlmeinfo->enc_algo = _NO_PRIVACY_;
- pmlmeinfo->authModeToggle = 0;
-
- memset(pmlmeinfo->chg_txt, 0, 128);
-
- pmlmeinfo->slotTime = SHORT_SLOT_TIME;
- pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
-
- pmlmeinfo->dialogToken = 0;
-
- pmlmeext->action_public_rxseq = 0xffff;
- pmlmeext->action_public_dialog_token = 0xff;
-}
-
-static int has_channel(struct rt_channel_info *channel_set,
- u8 chanset_size,
- u8 chan)
-{
- int i;
-
- for (i = 0; i < chanset_size; i++) {
- if (channel_set[i].ChannelNum == chan)
- return 1;
- }
- return 0;
-}
-
-static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
- u8 chanset_size,
- struct p2p_channels *channel_list)
-{
- struct p2p_oper_class_map op_class[] = {
- { IEEE80211G, 81, 1, 13, 1, BW20 },
- { IEEE80211G, 82, 14, 14, 1, BW20 },
- { -1, 0, 0, 0, 0, BW20 }
- };
-
- int cla, op;
-
- cla = 0;
-
- for (op = 0; op_class[op].op_class; op++) {
- u8 ch;
- struct p2p_oper_class_map *o = &op_class[op];
- struct p2p_reg_class *reg = NULL;
-
- for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
- if (!has_channel(channel_set, chanset_size, ch)) {
- continue;
- }
-
- if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
- continue;
-
- if (((padapter->registrypriv.cbw40_enable & BIT(1)) == 0) &&
- ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
- continue;
-
- if (!reg) {
- reg = &channel_list->reg_class[cla];
- cla++;
- reg->reg_class = o->op_class;
- reg->channels = 0;
- }
- reg->channel[reg->channels] = ch;
- reg->channels++;
- }
- }
- channel_list->reg_classes = cla;
-}
-
-static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
-{
- u8 index, chanset_size = 0;
- u8 b2_4GBand = false;
- u8 Index2G = 0;
-
- memset(channel_set, 0, sizeof(struct rt_channel_info) * MAX_CHANNEL_NUM);
-
- if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
- return chanset_size;
-
- if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
- b2_4GBand = true;
- if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
- Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
- else
- Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
- }
-
- if (b2_4GBand) {
- for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
- channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
-
- if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
- (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
- if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
- channel_set[chanset_size].ScanType = SCAN_ACTIVE;
- else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
- channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
- Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/* channel 12~13, passive scan */
- if (channel_set[chanset_size].ChannelNum <= 11)
- channel_set[chanset_size].ScanType = SCAN_ACTIVE;
- else
- channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else {
- channel_set[chanset_size].ScanType = SCAN_ACTIVE;
- }
-
- chanset_size++;
- }
- }
- return chanset_size;
-}
-
-static void _survey_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.survey_timer);
-
- survey_timer_hdl(padapter);
-}
-
-static void _link_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.link_timer);
-
- link_timer_hdl(padapter);
-}
-
-static void init_mlme_ext_timer(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- timer_setup(&pmlmeext->survey_timer, _survey_timer_hdl, 0);
- timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
-}
-
-void init_mlme_ext_priv(struct adapter *padapter)
-{
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmlmeext->padapter = padapter;
-
- init_mlme_ext_priv_value(padapter);
- pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
-
- init_mlme_ext_timer(padapter);
-
- init_mlme_ap_info(padapter);
-
- pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
- init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
-
- pmlmeext->chan_scan_time = SURVEY_TO;
- pmlmeext->mlmeext_init = true;
-
- pmlmeext->active_keep_alive_check = true;
-}
-
-void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
-{
- struct adapter *padapter = pmlmeext->padapter;
-
- if (!padapter)
- return;
-
- if (padapter->bDriverStopped) {
- _cancel_timer_ex(&pmlmeext->survey_timer);
- _cancel_timer_ex(&pmlmeext->link_timer);
- /* _cancel_timer_ex(&pmlmeext->ADDBA_timer); */
- }
-}
-
-static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
-{
- bool response = true;
-
- /* do nothing if the device name is empty */
- if (!padapter->wdinfo.device_name_len)
- response = false;
-
- if (response)
- issue_probersp_p2p(padapter, da);
-
- return _SUCCESS;
-}
-
-static void correct_TSF(struct adapter *padapter)
-{
- u8 reg;
- int res;
- u64 tsf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue,
- pmlmeinfo->bcn_interval * 1024) - 1024; /* us */
-
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
- ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
- rtw_stop_tx_beacon(padapter);
-
- /* disable related TSF function */
- res = rtw_read8(padapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(padapter, REG_BCN_CTRL, reg & (~BIT(3)));
-
- rtw_write32(padapter, REG_TSFTR, tsf);
- rtw_write32(padapter, REG_TSFTR + 4, tsf >> 32);
-
- /* enable related TSF function */
- res = rtw_read8(padapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(padapter, REG_BCN_CTRL, reg | BIT(3));
-
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) ||
- ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
- rtw_resume_tx_beacon(padapter);
-}
-
-/****************************************************************************
-
-Following are the callback functions for each subtype of the management frames
-
-*****************************************************************************/
-
-static void OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int ielen;
- unsigned char *p;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur = &pmlmeinfo->network;
- u8 *pframe = precv_frame->rx_data;
- uint len = precv_frame->len;
- u8 is_valid_p2p_probereq = false;
-
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
- !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
- !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) &&
- !rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH) &&
- !rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN)) {
- /* mcs_rate = 0 -> CCK 1M rate */
- /* mcs_rate = 1 -> CCK 2M rate */
- /* mcs_rate = 2 -> CCK 5.5M rate */
- /* mcs_rate = 3 -> CCK 11M rate */
- /* In the P2P mode, the driver should not support the CCK rate */
-
- /* Commented by Kurt 2012/10/16 */
- /* IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
- is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
- if (is_valid_p2p_probereq) {
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
- /* FIXME */
- report_survey_event(padapter, precv_frame);
- p2p_listen_state_process(padapter, get_sa(pframe));
-
- return;
- }
- }
- }
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- return;
-
- if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
- !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
- return;
-
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
- len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
-
- /* check (wildcard) SSID */
- if (p) {
- if (is_valid_p2p_probereq)
- goto _issue_probersp;
-
- if ((ielen != 0 && memcmp((void *)(p + 2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
- (ielen == 0 && pmlmeinfo->hidden_ssid_mode))
- return;
-
-_issue_probersp:
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- (pmlmepriv->cur_network.join_res ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
- }
-}
-
-static void OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 *pframe = precv_frame->rx_data;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
- if (pwdinfo->tx_prov_disc_info.benable) {
- if (!memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
- pwdinfo->tx_prov_disc_info.benable = false;
- issue_p2p_provision_request(padapter,
- pwdinfo->tx_prov_disc_info.ssid.Ssid,
- pwdinfo->tx_prov_disc_info.ssid.SsidLength,
- pwdinfo->tx_prov_disc_info.peerDevAddr);
- } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- pwdinfo->tx_prov_disc_info.benable = false;
- issue_p2p_provision_request(padapter, NULL, 0,
- pwdinfo->tx_prov_disc_info.peerDevAddr);
- }
- }
- }
- return;
- } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
- if (pwdinfo->nego_req_info.benable) {
- if (!memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
- pwdinfo->nego_req_info.benable = false;
- issue_p2p_GO_request(padapter, pwdinfo->nego_req_info.peerDevAddr);
- }
- }
- } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
- if (pwdinfo->invitereq_info.benable) {
- if (!memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
- pwdinfo->invitereq_info.benable = false;
- issue_p2p_invitation_request(padapter, pwdinfo->invitereq_info.peer_macaddr);
- }
- }
- }
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- report_survey_event(padapter, precv_frame);
- return;
- }
-}
-
-static void OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- int cam_idx;
- struct sta_info *psta;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- uint len = precv_frame->len;
- struct wlan_bssid_ex *pbss;
- u8 *ie_ptr;
- u32 ie_len;
-
- ie_ptr = (u8 *)&mgmt->u.beacon.variable;
- if (precv_frame->len < offsetof(struct ieee80211_mgmt, u.beacon.variable))
- return;
- ie_len = precv_frame->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- report_survey_event(padapter, precv_frame);
- return;
- }
-
- if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
- return;
-
- if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
- /* we should update current network before auth, or some IE is wrong */
- pbss = kmalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
- if (!pbss)
- return;
-
- if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
- update_network(&pmlmepriv->cur_network.network, pbss, padapter, true);
- rtw_get_bcn_info(&pmlmepriv->cur_network);
- }
- kfree(pbss);
-
- /* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
-
- pmlmeext->TSFValue = le64_to_cpu(mgmt->u.beacon.timestamp);
-
- /* start auth */
- start_clnt_auth(padapter);
-
- return;
- }
-
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
- psta = rtw_get_stainfo(pstapriv, mgmt->sa);
- if (!psta)
- return;
-
- if (rtw_check_bcn_info(padapter, pframe, len) != _SUCCESS) {
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 0);
- return;
- }
- /* update WMM, ERP in the beacon */
- /* todo: the timer is used instead of the number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0)
- update_beacon_info(padapter, ie_ptr, ie_len, psta);
- process_p2p_ps_ie(padapter, ie_ptr, ie_len);
- } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
- psta = rtw_get_stainfo(pstapriv, mgmt->sa);
- if (psta) {
- /* update WMM, ERP in the beacon */
- /* todo: the timer is used instead of the number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0)
- update_beacon_info(padapter, ie_ptr, ie_len, psta);
- } else {
- /* allocate a new CAM entry for IBSS station */
- cam_idx = allocate_fw_sta_entry(padapter);
- if (cam_idx == NUM_STA)
- return;
-
- /* get supported rate */
- if (update_sta_support_rate(padapter, ie_ptr, ie_len, cam_idx) == _FAIL) {
- pmlmeinfo->FW_sta_info[cam_idx].status = 0;
- return;
- }
-
- pmlmeext->TSFValue = le64_to_cpu(mgmt->u.beacon.timestamp);
-
- report_add_sta_event(padapter, mgmt->sa, cam_idx);
- }
- }
-}
-
-static void OnAuth(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int auth_mode, ie_len;
- u16 seq;
- unsigned char *sa, *p;
- u16 algorithm;
- int status;
- static struct sta_info stat;
- struct sta_info *pstat = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 *pframe = precv_frame->rx_data;
- uint len = precv_frame->len;
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- return;
-
- sa = GetAddr2Ptr(pframe);
-
- auth_mode = psecuritypriv->dot11AuthAlgrthm;
- seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + 2));
- algorithm = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN));
-
- if (auth_mode == 2 && psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
- psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
- auth_mode = 0;
-
- if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
- (algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
-
- status = _STATS_NO_SUPP_ALG_;
-
- goto auth_fail;
- }
-
- if (!rtw_access_ctrl(padapter, sa)) {
- status = _STATS_UNABLE_HANDLE_STA_;
- goto auth_fail;
- }
-
- pstat = rtw_get_stainfo(pstapriv, sa);
- if (!pstat) {
- /* allocate a new one */
- pstat = rtw_alloc_stainfo(pstapriv, sa);
- if (!pstat) {
- status = _STATS_UNABLE_HANDLE_STA_;
- goto auth_fail;
- }
-
- pstat->state = WIFI_FW_AUTH_NULL;
- pstat->auth_seq = 0;
- } else {
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&pstat->asoc_list)) {
- list_del_init(&pstat->asoc_list);
- pstapriv->asoc_list_cnt--;
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- if (seq == 1) {
- /* TODO: STA re_auth and auth timeout */
- }
- }
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (list_empty(&pstat->auth_list)) {
- list_add_tail(&pstat->auth_list, &pstapriv->auth_list);
- pstapriv->auth_list_cnt++;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- if (pstat->auth_seq == 0)
- pstat->expire_to = pstapriv->auth_to;
-
- if ((pstat->auth_seq + 1) != seq) {
- status = _STATS_OUT_OF_AUTH_SEQ_;
- goto auth_fail;
- }
-
- if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2)) {
- if (seq == 1) {
- pstat->state &= ~WIFI_FW_AUTH_NULL;
- pstat->state |= WIFI_FW_AUTH_SUCCESS;
- pstat->expire_to = pstapriv->assoc_to;
- pstat->authalg = algorithm;
- } else {
- status = _STATS_OUT_OF_AUTH_SEQ_;
- goto auth_fail;
- }
- } else { /* shared system or auto authentication */
- if (seq == 1) {
- /* prepare for the challenging txt... */
-
- pstat->state &= ~WIFI_FW_AUTH_NULL;
- pstat->state |= WIFI_FW_AUTH_STATE;
- pstat->authalg = algorithm;
- pstat->auth_seq = 2;
- } else if (seq == 3) {
- /* checking for challenging txt... */
-
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
- len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
-
- if (!p || ie_len <= 0) {
- status = _STATS_CHALLENGE_FAIL_;
- goto auth_fail;
- }
-
- if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
- pstat->state &= (~WIFI_FW_AUTH_STATE);
- pstat->state |= WIFI_FW_AUTH_SUCCESS;
- /* challenging txt is correct... */
- pstat->expire_to = pstapriv->assoc_to;
- } else {
- status = _STATS_CHALLENGE_FAIL_;
- goto auth_fail;
- }
- } else {
- status = _STATS_OUT_OF_AUTH_SEQ_;
- goto auth_fail;
- }
- }
-
- /* Now, we are going to issue_auth... */
- pstat->auth_seq = seq + 1;
-
- issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
-
- if (pstat->state & WIFI_FW_AUTH_SUCCESS)
- pstat->auth_seq = 0;
-
- return;
-
-auth_fail:
-
- if (pstat)
- rtw_free_stainfo(padapter, pstat);
-
- pstat = &stat;
- memset((char *)pstat, '\0', sizeof(stat));
- pstat->auth_seq = 2;
- memcpy(pstat->hwaddr, sa, 6);
-
- issue_auth(padapter, pstat, (unsigned short)status);
-}
-
-static void OnAuthClient(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int seq, len, status, offset;
- unsigned char *p;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- u8 *pframe = precv_frame->rx_data;
- uint pkt_len = precv_frame->len;
-
- /* check A1 matches or not */
- if (memcmp(myid(&padapter->eeprompriv), ieee80211_get_DA(hdr), ETH_ALEN))
- return;
-
- if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
- return;
-
- offset = ieee80211_has_protected(hdr->frame_control) ? 4 : 0;
-
- seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 2));
- status = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 4));
-
- if (status != 0) {
- if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
- else
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
- }
-
- set_link_timer(pmlmeext, 1);
- return;
- }
-
- if (seq == 2) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
- /* legendary shared system */
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
- pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
-
- if (!p)
- return;
-
- memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
- pmlmeinfo->auth_seq = 3;
- issue_auth(padapter, NULL, 0);
- set_link_timer(pmlmeext, REAUTH_TO);
-
- return;
- } else {
- /* open system */
- start_clnt_assoc(padapter);
- }
- } else if (seq == 4) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
- start_clnt_assoc(padapter);
- }
-}
-
-static void UpdateBrateTbl(u8 *mbrate)
-{
- u8 i;
- u8 rate;
-
- /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- rate = mbrate[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_24MB:
- mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
-static void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
-{
- u8 i;
- u8 rate;
-
- for (i = 0; i < bssratelen; i++) {
- rate = bssrateset[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
-static void OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- u16 capab_info;
- struct rtw_ieee802_11_elems elems;
- struct sta_info *pstat;
- unsigned char *p, *pos, *wpa_ie;
- unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
- int i, ie_len, wpa_ie_len, left;
- unsigned char supportRate[16];
- int supportRateNum;
- unsigned short status = _STATS_SUCCESSFUL_;
- unsigned short frame_type, ie_offset = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->rx_data;
- uint pkt_len = precv_frame->len;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 p2p_status_code = P2P_STATUS_SUCCESS;
- u8 *p2pie;
- u32 p2pielen = 0;
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- return;
-
- frame_type = GetFrameSubType(pframe);
- if (frame_type == WIFI_ASSOCREQ)
- ie_offset = _ASOCREQ_IE_OFFSET_;
- else /* WIFI_REASSOCREQ */
- ie_offset = _REASOCREQ_IE_OFFSET_;
-
- if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset)
- return;
-
- pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (pstat == (struct sta_info *)NULL) {
- status = _RSON_CLS2_;
- goto asoc_class2_error;
- }
-
- capab_info = get_unaligned_le16(pframe + WLAN_HDR_A3_LEN);
-
- left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
- pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
-
- /* check if this stat has been successfully authenticated/assocated */
- if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
- if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
- status = _RSON_CLS2_;
- goto asoc_class2_error;
- } else {
- pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
- pstat->state |= WIFI_FW_ASSOC_STATE;
- }
- } else {
- pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
- pstat->state |= WIFI_FW_ASSOC_STATE;
- }
- pstat->capability = capab_info;
- /* now parse all ieee802_11 ie to point to elems */
- if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
- !elems.ssid) {
- status = _STATS_FAILURE_;
- goto OnAssocReqFail;
- }
-
- /* now we should check all the fields... */
- /* checking SSID */
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
- pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (!p)
- status = _STATS_FAILURE_;
-
- if (ie_len == 0) { /* broadcast ssid, however it is not allowed in assocreq */
- status = _STATS_FAILURE_;
- } else {
- /* check if ssid match */
- if (memcmp((void *)(p + 2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
- status = _STATS_FAILURE_;
-
- if (ie_len != cur->Ssid.SsidLength)
- status = _STATS_FAILURE_;
- }
-
- if (status != _STATS_SUCCESSFUL_)
- goto OnAssocReqFail;
-
- /* check if the supported rate is ok */
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (!p) {
- /* use our own rate set as statoin used */
- /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
- /* supportRateNum = AP_BSSRATE_LEN; */
-
- status = _STATS_FAILURE_;
- goto OnAssocReqFail;
- } else {
- memcpy(supportRate, p + 2, ie_len);
- supportRateNum = ie_len;
-
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
- pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p) {
- if (supportRateNum <= sizeof(supportRate)) {
- memcpy(supportRate + supportRateNum, p + 2, ie_len);
- supportRateNum += ie_len;
- }
- }
- }
-
- /* todo: mask supportRate between AP & STA -> move to update raid */
- /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
-
- /* update station supportRate */
- pstat->bssratelen = supportRateNum;
- memcpy(pstat->bssrateset, supportRate, supportRateNum);
- UpdateBrateTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
-
- /* check RSN/WPA/WPS */
- pstat->dot8021xalg = 0;
- pstat->wpa_psk = 0;
- pstat->wpa_group_cipher = 0;
- pstat->wpa2_group_cipher = 0;
- pstat->wpa_pairwise_cipher = 0;
- pstat->wpa2_pairwise_cipher = 0;
- memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
- if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
- int group_cipher = 0, pairwise_cipher = 0;
-
- wpa_ie = elems.rsn_ie;
- wpa_ie_len = elems.rsn_ie_len;
-
- if (rtw_parse_wpa2_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- pstat->dot8021xalg = 1;/* psk, todo:802.1x */
- pstat->wpa_psk |= BIT(1);
-
- pstat->wpa2_group_cipher = group_cipher & psecuritypriv->wpa2_group_cipher;
- pstat->wpa2_pairwise_cipher = pairwise_cipher & psecuritypriv->wpa2_pairwise_cipher;
-
- if (!pstat->wpa2_group_cipher)
- status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
-
- if (!pstat->wpa2_pairwise_cipher)
- status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
- } else {
- status = WLAN_STATUS_INVALID_IE;
- }
- } else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
- int group_cipher = 0, pairwise_cipher = 0;
-
- wpa_ie = elems.wpa_ie;
- wpa_ie_len = elems.wpa_ie_len;
-
- if (rtw_parse_wpa_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- pstat->dot8021xalg = 1;/* psk, todo:802.1x */
- pstat->wpa_psk |= BIT(0);
-
- pstat->wpa_group_cipher = group_cipher & psecuritypriv->wpa_group_cipher;
- pstat->wpa_pairwise_cipher = pairwise_cipher & psecuritypriv->wpa_pairwise_cipher;
-
- if (!pstat->wpa_group_cipher)
- status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
-
- if (!pstat->wpa_pairwise_cipher)
- status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
- } else {
- status = WLAN_STATUS_INVALID_IE;
- }
- } else {
- wpa_ie = NULL;
- wpa_ie_len = 0;
- }
-
- if (status != _STATS_SUCCESSFUL_)
- goto OnAssocReqFail;
-
- pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
- if (!wpa_ie) {
- if (elems.wps_ie)
- pstat->flags |= WLAN_STA_WPS;
- /* wpabuf_free(sta->wps_ie); */
- /* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
- /* elems.wps_ie_len - 4); */
- else
- pstat->flags |= WLAN_STA_MAYBE_WPS;
-
- /* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
- /* that the selected registrar of AP is _FLASE */
- if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS | WLAN_STA_MAYBE_WPS))) {
- if (pmlmepriv->wps_beacon_ie) {
- u8 selected_registrar = 0;
-
- rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
-
- if (!selected_registrar) {
-
- status = _STATS_UNABLE_HANDLE_STA_;
-
- goto OnAssocReqFail;
- }
- }
- }
- } else {
- int copy_len;
-
- if (psecuritypriv->wpa_psk == 0) {
-
- status = WLAN_STATUS_INVALID_IE;
-
- goto OnAssocReqFail;
- }
-
- if (elems.wps_ie) {
- pstat->flags |= WLAN_STA_WPS;
- copy_len = 0;
- } else {
- copy_len = ((wpa_ie_len + 2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)) : (wpa_ie_len + 2);
- }
- if (copy_len > 0)
- memcpy(pstat->wpa_ie, wpa_ie - 2, copy_len);
- }
- /* check if there is WMM IE & support WWM-PS */
- pstat->flags &= ~WLAN_STA_WME;
- pstat->qos_option = 0;
- pstat->qos_info = 0;
- pstat->has_legacy_ac = true;
- pstat->uapsd_vo = 0;
- pstat->uapsd_vi = 0;
- pstat->uapsd_be = 0;
- pstat->uapsd_bk = 0;
- if (pmlmepriv->qospriv.qos_option) {
- p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
- for (;;) {
- p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p) {
- if (!memcmp(p + 2, WMM_IE, 6)) {
- pstat->flags |= WLAN_STA_WME;
-
- pstat->qos_option = 1;
- pstat->qos_info = *(p + 8);
-
- pstat->max_sp_len = (pstat->qos_info >> 5) & 0x3;
-
- if ((pstat->qos_info & 0xf) != 0xf)
- pstat->has_legacy_ac = true;
- else
- pstat->has_legacy_ac = false;
-
- if (pstat->qos_info & 0xf) {
- if (pstat->qos_info & BIT(0))
- pstat->uapsd_vo = BIT(0) | BIT(1);
- else
- pstat->uapsd_vo = 0;
-
- if (pstat->qos_info & BIT(1))
- pstat->uapsd_vi = BIT(0) | BIT(1);
- else
- pstat->uapsd_vi = 0;
-
- if (pstat->qos_info & BIT(2))
- pstat->uapsd_bk = BIT(0) | BIT(1);
- else
- pstat->uapsd_bk = 0;
-
- if (pstat->qos_info & BIT(3))
- pstat->uapsd_be = BIT(0) | BIT(1);
- else
- pstat->uapsd_be = 0;
- }
- break;
- }
- } else {
- break;
- }
- p = p + ie_len + 2;
- }
- }
-
- /* save HT capabilities in the sta object */
- memset(&pstat->htpriv.ht_cap, 0, sizeof(struct ieee80211_ht_cap));
- if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct ieee80211_ht_cap)) {
- pstat->flags |= WLAN_STA_HT;
-
- pstat->flags |= WLAN_STA_WME;
-
- memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct ieee80211_ht_cap));
- } else {
- pstat->flags &= ~WLAN_STA_HT;
- }
- if ((!pmlmepriv->htpriv.ht_option) && (pstat->flags & WLAN_STA_HT)) {
- status = _STATS_FAILURE_;
- goto OnAssocReqFail;
- }
-
- pstat->flags |= WLAN_STA_NONERP;
- for (i = 0; i < pstat->bssratelen; i++) {
- if ((pstat->bssrateset[i] & 0x7f) > 22) {
- pstat->flags &= ~WLAN_STA_NONERP;
- break;
- }
- }
-
- if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
- else
- pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
-
- if (status != _STATS_SUCCESSFUL_)
- goto OnAssocReqFail;
-
- pstat->is_p2p_device = false;
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, pkt_len - WLAN_HDR_A3_LEN - ie_offset, NULL, &p2pielen);
- if (p2pie) {
- pstat->is_p2p_device = true;
- p2p_status_code = (u8)process_assoc_req_p2p_ie(pwdinfo, pframe, pkt_len, pstat);
- if (p2p_status_code > 0) {
- pstat->p2p_status_code = p2p_status_code;
- status = _STATS_CAP_FAIL_;
- goto OnAssocReqFail;
- }
- }
- }
- pstat->p2p_status_code = p2p_status_code;
-
- /* TODO: identify_proprietary_vendor_ie(); */
- /* Realtek proprietary IE */
- /* identify if this is Broadcom sta */
- /* identify if this is ralink sta */
- /* Customer proprietary IE */
-
- /* get a unique AID */
- if (pstat->aid == 0) {
- for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
- if (!pstapriv->sta_aid[pstat->aid - 1])
- break;
-
- /* if (pstat->aid > NUM_STA) { */
- if (pstat->aid > pstapriv->max_num_sta) {
- pstat->aid = 0;
-
- status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
-
- goto OnAssocReqFail;
- } else {
- pstapriv->sta_aid[pstat->aid - 1] = pstat;
- }
- }
-
- pstat->state &= (~WIFI_FW_ASSOC_STATE);
- pstat->state |= WIFI_FW_ASSOC_SUCCESS;
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (!list_empty(&pstat->auth_list)) {
- list_del_init(&pstat->auth_list);
- pstapriv->auth_list_cnt--;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (list_empty(&pstat->asoc_list)) {
- pstat->expire_to = pstapriv->expire_to;
- list_add_tail(&pstat->asoc_list, &pstapriv->asoc_list);
- pstapriv->asoc_list_cnt++;
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- /* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
- /* 1 bss_cap_update & sta_info_update */
- bss_cap_update_on_sta_join(padapter, pstat);
- sta_info_update(padapter, pstat);
-
- /* issue assoc rsp before notify station join event. */
- if (frame_type == WIFI_ASSOCREQ)
- issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
- else
- issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
-
- /* 2 - report to upper layer */
- rtw_indicate_sta_assoc_event(padapter, pstat);
-
- /* 3-(1) report sta add event */
- report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
- }
-
- return;
-
-asoc_class2_error:
-
- issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
-
- return;
-
-OnAssocReqFail:
-
- pstat->aid = 0;
- if (frame_type == WIFI_ASSOCREQ)
- issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
- else
- issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
-
- return;
-}
-
-static void OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- uint i;
- int res;
- struct ndis_802_11_var_ie *pIE;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- /* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
- u8 *pframe = precv_frame->rx_data;
- uint pkt_len = precv_frame->len;
-
- /* check A1 matches or not */
- if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))
- return;
-
- if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
- return;
-
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- return;
-
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- if (le16_to_cpu(mgmt->u.assoc_resp.status_code) > 0) {
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- res = -4;
- goto report_assoc_result;
- }
-
- pmlmeinfo->capability = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
-
- /* set slot time */
- pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
-
- pmlmeinfo->aid = le16_to_cpu(mgmt->u.assoc_resp.aid) & 0x3fff;
- res = pmlmeinfo->aid;
-
- /* following are moved to join event callback function */
- /* to handle HT, WMM, rate adaptive, update MAC reg */
- /* for not to handle the synchronous IO in the tasklet */
- for (i = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); i < pkt_len;) {
- pIE = (struct ndis_802_11_var_ie *)(pframe + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if (!memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
- WMM_param_handler(padapter, pIE);
- break;
- case _HT_CAPABILITY_IE_: /* HT caps */
- HT_caps_handler(padapter, pIE);
- break;
- case _HT_EXTRA_INFO_IE_: /* HT info */
- HT_info_handler(padapter, pIE);
- break;
- case _ERPINFO_IE_:
- ERP_IE_handler(padapter, pIE);
- break;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
-
- pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
-
- /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
- UpdateBrateTbl(pmlmeinfo->network.SupportedRates);
-
-report_assoc_result:
- report_join_res(padapter, res);
-}
-
-static void OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- unsigned short reason;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
- return;
-
- if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
- }
-
- reason = le16_to_cpu(mgmt->u.disassoc.reason_code);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 updated = 0;
-
- psta = rtw_get_stainfo(pstapriv, mgmt->sa);
- if (!psta)
- return;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&psta->asoc_list)) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, false, reason);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update(padapter, updated);
- } else {
- bool ignore_received_deauth = false;
-
- /* Before sending the auth frame to start the STA/GC mode connection with AP/GO,
- * we will send the deauth first.
- * However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth.
- * Added the following code to avoid this case.
- */
- if (pmlmeinfo->state & (WIFI_FW_AUTH_STATE | WIFI_FW_ASSOC_STATE)) {
- if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
- ignore_received_deauth = true;
- } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
- // TODO: 802.11r
- ignore_received_deauth = true;
- }
- }
-
- if (!ignore_received_deauth)
- receive_disconnect(padapter, mgmt->bssid, reason);
-
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
- }
-}
-
-static void OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- u16 reason;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 updated = 0;
-
- if (memcmp(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
- return;
-
- if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
- }
-
- reason = le16_to_cpu(mgmt->u.disassoc.reason_code);
-
- if (!check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- receive_disconnect(padapter, mgmt->bssid, reason);
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
- return;
- }
-
- psta = rtw_get_stainfo(pstapriv, mgmt->sa);
- if (!psta)
- return;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&psta->asoc_list)) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, false, reason);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update(padapter, updated);
-}
-
-static void OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- struct sta_info *psta = NULL;
- struct recv_reorder_ctrl *preorder_ctrl;
- unsigned short tid;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
- return;
-
- psta = rtw_get_stainfo(pstapriv, mgmt->sa);
- if (!psta)
- return;
-
- if (!pmlmeinfo->HT_enable)
- return;
- /* All union members start with an action code, it's ok to use addba_req. */
- switch (mgmt->u.action.u.addba_req.action_code) {
- case WLAN_ACTION_ADDBA_REQ:
- tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_req.capab),
- IEEE80211_ADDBA_PARAM_TID_MASK);
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->enable = pmlmeinfo->bAcceptAddbaReq;
- issue_action_BA(padapter, mgmt->sa, WLAN_ACTION_ADDBA_RESP,
- pmlmeinfo->bAcceptAddbaReq ?
- WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED, mgmt);
- break;
- case WLAN_ACTION_ADDBA_RESP:
- tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_resp.capab),
- IEEE80211_ADDBA_PARAM_TID_MASK);
- if (mgmt->u.action.u.addba_resp.status == 0) { /* successful */
- psta->htpriv.agg_enable_bitmap |= BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- } else {
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- }
- break;
- case WLAN_ACTION_DELBA:
- tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
- IEEE80211_DELBA_PARAM_TID_MASK);
- if (u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
- IEEE80211_DELBA_PARAM_INITIATOR_MASK) == WLAN_BACK_RECIPIENT) {
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- } else {
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- }
- /* todo: how to notify the host while receiving DELETE BA */
- break;
- default:
- break;
- }
-}
-
-static int get_reg_classes_full_count(struct p2p_channels *channel_list)
-{
- int cnt = 0;
- int i;
-
- for (i = 0; i < channel_list->reg_classes; i++) {
- cnt += channel_list->reg_class[i].channels;
- }
-
- return cnt;
-}
-
-void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_GO_NEGO_REQ;
- u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
- u8 wpsielen = 0, p2pielen = 0;
- u16 len_channellist_attr = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pwdinfo->negotiation_dialog_token = 1; /* Initialize the dialog value */
- pframe = rtw_set_fixed_ie(pframe, 1, &pwdinfo->negotiation_dialog_token, &pattrib->pktlen);
-
- /* WPS Section */
- wpsielen = 0;
- /* WPS OUI */
- *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* WPS version */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
-
- /* Device Password ID */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
-
- if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN)
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
- else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN)
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
- else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC)
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
-
- wpsielen += 2;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- /* P2P IE Section. */
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negotiation request frame should contain 9 P2P attributes */
- /* 1. P2P Capability */
- /* 2. Group Owner Intent */
- /* 3. Configuration Timeout */
- /* 4. Listen Channel */
- /* 5. Extended Listen Timing */
- /* 6. Intended P2P Interface Address */
- /* 7. Channel List */
- /* 8. P2P Device Info */
- /* 9. Operating Channel */
-
- /* P2P Capability */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
- else
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
-
- /* Group Owner Intent */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- /* Todo the tie breaker bit. */
- p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
-
- /* Configuration Timeout */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
-
- /* Listen Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->listen_channel; /* listening channel number */
-
- /* Extended Listen Timing ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
- p2pielen += 2;
-
- /* Value: */
- /* Availability Period */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- /* Availability Interval */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- /* Intended P2P Interface Address */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Channel List */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
-
- /* Length: */
- /* Country String(3) */
- /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
- /* + number of channels in all classes */
- len_channellist_attr = 3
- + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
- + get_reg_classes_full_count(&pmlmeext->channel_list);
-
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Channel Entry List */
-
- {
- int i, j;
- for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
- /* Operating Class */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
-
- /* Number of Channels */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
-
- /* Channel List */
- for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
- }
- }
- }
-
- /* Device Info */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
-
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
-
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- /* Operating Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51;
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_GO_NEGO_RESP;
- u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
- u8 p2pielen = 0;
- uint wpsielen = 0;
- u16 wps_devicepassword_id = 0x0000;
- __be16 be_tmp;
- uint wps_devicepassword_id_len = 0;
- u16 len_channellist_attr = 0;
-
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pwdinfo->negotiation_dialog_token = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
- pframe = rtw_set_fixed_ie(pframe, 1, &pwdinfo->negotiation_dialog_token, &pattrib->pktlen);
-
- /* Commented by Albert 20110328 */
- /* Try to get the device password ID from the WPS IE of group negotiation request frame */
- /* WiFi Direct test plan 5.1.15 */
- rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, wpsie, &wpsielen);
- rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
- wps_devicepassword_id = be16_to_cpu(be_tmp);
-
- memset(wpsie, 0x00, 255);
- wpsielen = 0;
-
- /* WPS Section */
- wpsielen = 0;
- /* WPS OUI */
- *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* WPS version */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
-
- /* Device Password ID */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
- if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
- else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
- else
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
- wpsielen += 2;
-
- /* Commented by Kurt 20120113 */
- /* If some device wants to do p2p handshake without sending prov_disc_req */
- /* We have to get peer_req_cm from here. */
- if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
- if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
- else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
- else
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
- }
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- /* P2P IE Section. */
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20100908 */
- /* According to the P2P Specification, the group negotiation response frame should contain 9 P2P attributes */
- /* 1. Status */
- /* 2. P2P Capability */
- /* 3. Group Owner Intent */
- /* 4. Configuration Timeout */
- /* 5. Operating Channel */
- /* 6. Intended P2P Interface Address */
- /* 7. Channel List */
- /* 8. Device Info */
- /* 9. Group ID (Only GO) */
-
- /* ToDo: */
-
- /* P2P Status */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_STATUS;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = result;
-
- /* P2P Capability */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
- /* Commented by Albert 2011/03/08 */
- /* According to the P2P specification */
- /* if the sending device will be client, the P2P Capability should be reserved of group negotiation response frame */
- p2pie[p2pielen++] = 0;
- } else {
- /* Be group owner or meet the error case */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
- }
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported) {
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
- } else {
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
- }
-
- /* Group Owner Intent */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- if (pwdinfo->peer_intent & 0x01) {
- /* Peer's tie breaker bit is 1, our tie breaker bit should be 0 */
- p2pie[p2pielen++] = (pwdinfo->intent << 1);
- } else {
- /* Peer's tie breaker bit is 0, our tie breaker bit should be 1 */
- p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
- }
-
- /* Configuration Timeout */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
-
- /* Operating Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51;
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
-
- /* Intended P2P Interface Address */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Channel List */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
-
- /* Country String(3) */
- /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
- /* + number of channels in all classes */
- len_channellist_attr = 3
- + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(&pmlmeext->channel_list);
-
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
-
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Channel Entry List */
-
- {
- int i, j;
- for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
- /* Operating Class */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
-
- /* Number of Channels */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
-
- /* Channel List */
- for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
- }
- }
- }
-
- /* Device Info */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
-
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
-
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* Group ID Attribute */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
- p2pielen += 2;
-
- /* Value: */
- /* p2P Device Address */
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* SSID */
- memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
- p2pielen += pwdinfo->nego_ssidlen;
- }
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_GO_NEGO_CONF;
- u8 p2pie[255] = { 0x00 };
- u8 p2pielen = 0;
-
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &pwdinfo->negotiation_dialog_token, &pattrib->pktlen);
-
- /* P2P IE Section. */
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negotiation request frame should contain 5 P2P attributes */
- /* 1. Status */
- /* 2. P2P Capability */
- /* 3. Operating Channel */
- /* 4. Channel List */
- /* 5. Group ID (if this WiFi is GO) */
-
- /* P2P Status */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_STATUS;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = result;
-
- /* P2P Capability */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
- else
- p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
-
- /* Operating Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
- /* Operating Class */
- p2pie[p2pielen++] = 0x51;
- p2pie[p2pielen++] = pwdinfo->peer_operating_ch;
- } else {
- /* Operating Class */
- p2pie[p2pielen++] = 0x51;
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->operating_channel; /* Use the listen channel as the operating channel */
- }
-
- /* Channel List */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(pwdinfo->channel_list_attr_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len);
- p2pielen += pwdinfo->channel_list_attr_len;
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* Group ID Attribute */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
- p2pielen += 2;
-
- /* Value: */
- /* p2P Device Address */
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* SSID */
- memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
- p2pielen += pwdinfo->nego_ssidlen;
- }
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
- pattrib->last_txcmdsz = pattrib->pktlen;
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_INVIT_REQ;
- u8 p2pie[255] = { 0x00 };
- u8 p2pielen = 0;
- u8 dialogToken = 3;
- u16 len_channellist_attr = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- /* P2P IE Section. */
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20101011 */
- /* According to the P2P Specification, the P2P Invitation request frame should contain 7 P2P attributes */
- /* 1. Configuration Timeout */
- /* 2. Invitation Flags */
- /* 3. Operating Channel (Only GO) */
- /* 4. P2P Group BSSID (Should be included if I am the GO) */
- /* 5. Channel List */
- /* 6. P2P Group ID */
- /* 7. P2P Device Info */
-
- /* Configuration Timeout */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
-
- /* Invitation Flags */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_INVITATION_FLAGS;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = P2P_INVITATION_FLAGS_PERSISTENT;
-
- /* Operating Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51;
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch; /* operating channel number */
-
- if (!memcmp(myid(&padapter->eeprompriv), pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
- /* P2P Group BSSID */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address for GO */
- memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
- p2pielen += ETH_ALEN;
- }
-
- /* Channel List */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
-
- /* Length: */
- /* Country String(3) */
- /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
- /* + number of channels in all classes */
- len_channellist_attr = 3
- + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(&pmlmeext->channel_list);
-
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
-
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Channel Entry List */
- {
- int i, j;
- for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
- /* Operating Class */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
-
- /* Number of Channels */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
-
- /* Channel List */
- for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
- }
- }
- }
-
- /* P2P Group ID */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(6 + pwdinfo->invitereq_info.ssidlen);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address for GO */
- memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* SSID */
- memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_ssid, pwdinfo->invitereq_info.ssidlen);
- p2pielen += pwdinfo->invitereq_info.ssidlen;
-
- /* Device Info */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_INVIT_RESP;
- u8 p2pie[255] = { 0x00 };
- u8 p2pielen = 0;
- u16 len_channellist_attr = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- /* P2P IE Section. */
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20101005 */
- /* According to the P2P Specification, the P2P Invitation response frame should contain 5 P2P attributes */
- /* 1. Status */
- /* 2. Configuration Timeout */
- /* 3. Operating Channel (Only GO) */
- /* 4. P2P Group BSSID (Only GO) */
- /* 5. Channel List */
-
- /* P2P Status */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_STATUS;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
- p2pielen += 2;
-
- /* Value: */
- /* When status code is P2P_STATUS_FAIL_INFO_UNAVAILABLE. */
- /* Sent the event receiving the P2P Invitation Req frame to DMP UI. */
- /* DMP had to compare the MAC address to find out the profile. */
- /* So, the WiFi driver will send the P2P_STATUS_FAIL_INFO_UNAVAILABLE to NB. */
- /* If the UI found the corresponding profile, the WiFi driver sends the P2P Invitation Req */
- /* to NB to rebuild the persistent group. */
- p2pie[p2pielen++] = status_code;
-
- /* Configuration Timeout */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
- p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
-
- if (status_code == P2P_STATUS_SUCCESS) {
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* The P2P Invitation request frame asks this Wi-Fi device to be the P2P GO */
- /* In this case, the P2P Invitation response frame should carry the two more P2P attributes. */
- /* First one is operating channel attribute. */
- /* Second one is P2P Group BSSID attribute. */
-
- /* Operating Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
-
- /* P2P Group BSSID */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address for GO */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
- }
-
- /* Channel List */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
-
- /* Length: */
- /* Country String(3) */
- /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
- /* + number of channels in all classes */
- len_channellist_attr = 3
- + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(&pmlmeext->channel_list);
-
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Channel Entry List */
- {
- int i, j;
- for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
- /* Operating Class */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
-
- /* Number of Channels */
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
-
- /* Channel List */
- for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
- p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
- }
- }
- }
- }
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
-{
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- u8 dialogToken = 1;
- u8 oui_subtype = P2P_PROVISION_DISC_REQ;
- u8 wpsie[100] = { 0x00 };
- u8 wpsielen = 0;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u32 p2pielen = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pdev_raddr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- p2pielen = build_prov_disc_request_p2p_ie(pwdinfo, pframe, pssid, ussidlen, pdev_raddr);
-
- pframe += p2pielen;
- pattrib->pktlen += p2pielen;
-
- wpsielen = 0;
- /* WPS OUI */
- *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* WPS version */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
-
- /* Config Method */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->tx_prov_disc_info.wps_config_method_request);
- wpsielen += 2;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static u8 is_matched_in_profilelist(u8 *peermacaddr, struct profile_info *profileinfo)
-{
- u8 i, match_result = 0;
-
- for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
- if (!memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
- match_result = 1;
- break;
- }
- }
- return match_result;
-}
-
-void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned char *mac;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- u16 beacon_interval = 100;
- u16 capInfo = 0;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 wpsie[255] = { 0x00 };
- u32 wpsielen = 0, p2pielen = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&padapter->eeprompriv);
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
-
- /* Use the device address for BSSID field. */
- memcpy(pwlanhdr->addr3, mac, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(fctrl, WIFI_PROBERSP);
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = pattrib->hdrlen;
- pframe += pattrib->hdrlen;
-
- /* timestamp will be inserted by hardware */
- pframe += 8;
- pattrib->pktlen += 8;
-
- /* beacon interval: 2 bytes */
- memcpy(pframe, (unsigned char *)&beacon_interval, 2);
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* capability info: 2 bytes */
- /* ESS and IBSS bits must be 0 (defined in the 3.1.2.1.1 of WiFi Direct Spec) */
- capInfo |= cap_ShortPremble;
- capInfo |= cap_ShortSlot;
-
- memcpy(pframe, (unsigned char *)&capInfo, 2);
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, 7, pwdinfo->p2p_wildcard_ssid, &pattrib->pktlen);
-
- /* supported rates... */
- /* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&pwdinfo->listen_channel, &pattrib->pktlen);
-
- /* Todo: WPS IE */
- /* Noted by Albert 20100907 */
- /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
-
- wpsielen = 0;
- /* WPS OUI */
- *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* WPS version */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
-
- /* WiFi Simple Config State */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SIMPLE_CONF_STATE);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_WSC_STATE_NOT_CONFIG; /* Not Configured. */
-
- /* Response Type */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_RESP_TYPE);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_RESPONSE_TYPE_8021X;
-
- /* UUID-E */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
- wpsielen += 0x10;
-
- /* Manufacturer */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MANUFACTURER);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0007);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, "Realtek", 7);
- wpsielen += 7;
-
- /* Model Name */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NAME);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0006);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, "8188EU", 6);
- wpsielen += 6;
-
- /* Model Number */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NUMBER);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = 0x31; /* character 1 */
-
- /* Serial Number */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SERIAL_NUMBER);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(ETH_ALEN);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, "123456", ETH_ALEN);
- wpsielen += ETH_ALEN;
-
- /* Primary Device Type */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
- wpsielen += 2;
-
- /* Value: */
- /* Category ID */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- wpsielen += 2;
-
- /* OUI */
- *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- wpsielen += 2;
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
- wpsielen += 2;
-
- /* Value: */
- if (pwdinfo->device_name_len) {
- memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
- wpsielen += pwdinfo->device_name_len;
- }
-
- /* Config Method */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
- wpsielen += 2;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- p2pielen = build_probe_resp_p2p_ie(pwdinfo, pframe);
- pframe += p2pielen;
- pattrib->pktlen += p2pielen;
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-inline void issue_probereq_p2p(struct adapter *padapter)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned char *mac;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
- u16 wpsielen = 0, p2pielen = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&padapter->eeprompriv);
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- if ((pwdinfo->p2p_info.scan_op_ch_only) || (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
- /* This two flags will be set when this is only the P2P client mode. */
- memcpy(pwlanhdr->addr1, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
- } else {
- /* broadcast probe request frame */
- eth_broadcast_addr(pwlanhdr->addr1);
- eth_broadcast_addr(pwlanhdr->addr3);
- }
-
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_PROBEREQ);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
- pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &pattrib->pktlen);
- else
- pframe = rtw_set_ie(pframe, _SSID_IE_, P2P_WILDCARD_SSID_LEN, pwdinfo->p2p_wildcard_ssid, &pattrib->pktlen);
-
- /* Use the OFDM rate in the P2P probe request frame. (6(B), 9(B), 12(B), 24(B), 36, 48, 54) */
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
-
- /* WPS IE */
- /* Noted by Albert 20110221 */
- /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
-
- wpsielen = 0;
- /* WPS OUI */
- *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* WPS version */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
- wpsielen += 2;
-
- /* Value: */
- wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
-
- if (!pmlmepriv->wps_probe_req_ie) {
- /* UUID-E */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
- wpsielen += 0x10;
-
- /* Config Method */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
- wpsielen += 2;
- }
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
- wpsielen += 2;
-
- /* Value: */
- memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
- wpsielen += pwdinfo->device_name_len;
-
- /* Primary Device Type */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
- wpsielen += 2;
-
- /* Value: */
- /* Category ID */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_RTK_WIDI);
- wpsielen += 2;
-
- /* OUI */
- *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
- wpsielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_RTK_DMP);
- wpsielen += 2;
-
- /* Device Password ID */
- /* Type: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
- wpsielen += 2;
-
- /* Length: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
- wpsielen += 2;
-
- /* Value: */
- *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC); /* Registrar-specified */
- wpsielen += 2;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20110221 */
- /* According to the P2P Specification, the probe request frame should contain 5 P2P attributes */
- /* 1. P2P Capability */
- /* 2. P2P Device ID if this probe request wants to find the specific P2P device */
- /* 3. Listen Channel */
- /* 4. Extended Listen Timing */
- /* 5. Operating Channel if this WiFi is working as the group owner now */
-
- /* P2P Capability */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
- else
- p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
-
- /* Listen Channel */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->listen_channel; /* listen channel */
-
- /* Extended Listen Timing */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
- p2pielen += 2;
-
- /* Value: */
- /* Availability Period */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- /* Availability Interval */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* Operating Channel (if this WiFi is working as the group owner now) */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
- p2pielen += 2;
-
- /* Value: */
- /* Country String */
- p2pie[p2pielen++] = 'X';
- p2pie[p2pielen++] = 'X';
-
- /* The third byte should be set to 0x04. */
- /* Described in the "Operating Channel Attribute" section. */
- p2pie[p2pielen++] = 0x04;
-
- /* Operating Class */
- p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
-
- /* Channel Number */
- p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
- }
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
-
- if (pmlmepriv->wps_probe_req_ie) {
- /* WPS IE */
- memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
- pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
- pframe += pmlmepriv->wps_probe_req_ie_len;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static s32 rtw_action_public_decache(struct recv_frame *recv_frame, u8 token)
-{
- struct adapter *adapter = recv_frame->adapter;
- struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;
- u8 *frame = recv_frame->rx_data;
- u16 seq_ctrl = ((recv_frame->attrib.seq_num & 0xffff) << 4) |
- (recv_frame->attrib.frag_num & 0xf);
-
- if (GetRetry(frame)) {
- if ((seq_ctrl == mlmeext->action_public_rxseq) &&
- (token == mlmeext->action_public_dialog_token))
- return _FAIL;
- }
-
- mlmeext->action_public_rxseq = seq_ctrl;
- mlmeext->action_public_dialog_token = token;
- return _SUCCESS;
-}
-
-static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
-{
- u8 *pframe = precv_frame->rx_data;
- u8 *frame_body;
- u8 dialogToken = 0;
- struct adapter *padapter = precv_frame->adapter;
- uint len = precv_frame->len;
- u8 *p2p_ie;
- u32 p2p_ielen;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 result = P2P_STATUS_SUCCESS;
- u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
- frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
-
- dialogToken = frame_body[7];
-
- if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
- return _FAIL;
-
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- /* Do nothing if the driver doesn't enable the P2P function. */
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
- return _SUCCESS;
-
- len -= sizeof(struct ieee80211_hdr_3addr);
-
- switch (frame_body[6]) { /* OUI Subtype */
- case P2P_GO_NEGO_REQ:
- memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
- rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL)) {
- /* Commented by Albert 20110526 */
- /* In this case, this means the previous nego fail doesn't be reset yet. */
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- /* Restore the previous p2p state */
- rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
- }
-
- /* Commented by Kurt 20110902 */
- /* Add if statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
-
- /* Commented by Kurt 20120113 */
- /* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
- if (!memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr, ETH_ALEN))
- memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
-
- result = process_p2p_group_negotation_req(pwdinfo, frame_body, len);
- issue_p2p_GO_response(padapter, GetAddr2Ptr(pframe), frame_body, len, result);
-
- /* Commented by Albert 20110718 */
- /* No matter negotiating or negotiation failure, the driver should set up the restore P2P state timer. */
- _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
- break;
- case P2P_GO_NEGO_RESP:
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
- /* Commented by Albert 20110425 */
- /* The restore timer is enabled when issuing the nego request frame of rtw_p2p_connect function. */
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- pwdinfo->nego_req_info.benable = false;
- result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
- issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
- if (result == P2P_STATUS_SUCCESS) {
- if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
- pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
- pwdinfo->p2p_info.scan_op_ch_only = 1;
- _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
- }
- }
- /* Reset the dialog token for group negotiation frames. */
- pwdinfo->negotiation_dialog_token = 1;
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
- _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
- }
- break;
- case P2P_GO_NEGO_CONF:
- result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
- if (result == P2P_STATUS_SUCCESS) {
- if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
- pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
- pwdinfo->p2p_info.scan_op_ch_only = 1;
- _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
- }
- }
- break;
- case P2P_INVIT_REQ:
- /* Added by Albert 2010/10/05 */
- /* Received the P2P Invite Request frame. */
-
- p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
- if (p2p_ie) {
- /* Parse the necessary information from the P2P Invitation Request frame. */
- /* For example: The MAC address of sending this P2P Invitation Request frame. */
- u32 attr_contentlen = 0;
- u8 status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- struct group_id_info group_id;
- u8 invitation_flag = 0;
-
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INVITATION_FLAGS, &invitation_flag, &attr_contentlen);
- if (attr_contentlen) {
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_BSSID, pwdinfo->p2p_peer_interface_addr, &attr_contentlen);
- /* Commented by Albert 20120510 */
- /* Copy to the pwdinfo->p2p_peer_interface_addr. */
- /* So that the WFD UI (or Sigma) can get the peer interface address by using the following command. */
- /* #> iwpriv wlan0 p2p_get peer_ifa */
- /* After having the peer interface address, the sigma can find the correct conf file for wpa_supplicant. */
-
- if (invitation_flag & P2P_INVITATION_FLAGS_PERSISTENT) {
- /* Re-invoke the persistent group. */
-
- memset(&group_id, 0x00, sizeof(struct group_id_info));
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
- if (attr_contentlen) {
- if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
- /* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- status_code = P2P_STATUS_SUCCESS;
- } else {
- /* The p2p device sending this p2p invitation request wants to be the persistent GO. */
- if (is_matched_in_profilelist(pwdinfo->p2p_peer_interface_addr, &pwdinfo->profileinfo[0])) {
- u8 operatingch_info[5] = { 0x00 };
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
- if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, (u32)operatingch_info[4])) {
- /* The operating channel is acceptable for this device. */
- pwdinfo->rx_invitereq_info.operation_ch[0] = operatingch_info[4];
- pwdinfo->rx_invitereq_info.scan_op_ch_only = 1;
- _set_timer(&pwdinfo->reset_ch_sitesurvey, P2P_RESET_SCAN_CH);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- status_code = P2P_STATUS_SUCCESS;
- } else {
- /* The operating channel isn't supported by this device. */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- status_code = P2P_STATUS_FAIL_NO_COMMON_CH;
- _set_timer(&pwdinfo->restore_p2p_state_timer, 3000);
- }
- } else {
- /* Commented by Albert 20121130 */
- /* Intel will use the different P2P IE to store the operating channel information */
- /* Workaround for Intel WiDi 3.5 */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- status_code = P2P_STATUS_SUCCESS;
- }
- } else {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
- status_code = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
- }
- }
- } else {
- status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- }
- } else {
- /* Received the invitation to join a P2P group. */
-
- memset(&group_id, 0x00, sizeof(struct group_id_info));
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
- if (attr_contentlen) {
- if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
- /* In this case, the GO can't be myself. */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
- status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- } else {
- /* The p2p device sending this p2p invitation request wants to join an existing P2P group */
- /* Commented by Albert 2012/06/28 */
- /* In this case, this Wi-Fi device should use the iwpriv command to get the peer device address. */
- /* The peer device address should be the destination address for the provisioning discovery request. */
- /* Then, this Wi-Fi device should use the iwpriv command to get the peer interface address. */
- /* The peer interface address should be the address for WPS mac address */
- memcpy(pwdinfo->p2p_peer_device_addr, group_id.go_device_addr, ETH_ALEN);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_JOIN);
- status_code = P2P_STATUS_SUCCESS;
- }
- } else {
- status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- }
- }
- } else {
- status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- }
-
- pwdinfo->inviteresp_info.token = frame_body[7];
- issue_p2p_invitation_response(padapter, GetAddr2Ptr(pframe), pwdinfo->inviteresp_info.token, status_code);
- }
- break;
- case P2P_INVIT_RESP: {
- u8 attr_content = 0x00;
- u32 attr_contentlen = 0;
-
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
- if (p2p_ie) {
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
-
- if (attr_contentlen == 1) {
- pwdinfo->invitereq_info.benable = false;
-
- if (attr_content == P2P_STATUS_SUCCESS) {
- if (!memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- }
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_OK);
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
- }
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
- }
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
- }
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL))
- _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
- break;
- }
- case P2P_DEVDISC_REQ:
- process_p2p_devdisc_req(pwdinfo, pframe, len);
- break;
- case P2P_DEVDISC_RESP:
- process_p2p_devdisc_resp(pwdinfo, pframe, len);
- break;
- case P2P_PROVISION_DISC_REQ:
- process_p2p_provdisc_req(pwdinfo, pframe, len);
- memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
-
- /* 20110902 Kurt */
- /* Add the following statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
-
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ);
- _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
- break;
- case P2P_PROVISION_DISC_RESP:
- /* Commented by Albert 20110707 */
- /* Should we check the pwdinfo->tx_prov_disc_info.bsent flag here?? */
- /* Commented by Albert 20110426 */
- /* The restore timer is enabled when issuing the provisioing request frame in rtw_p2p_prov_disc function. */
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_RSP);
- process_p2p_provdisc_resp(pwdinfo, pframe);
- _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
- break;
- }
-
- return _SUCCESS;
-}
-
-static void on_action_public(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- u8 *frame_body = (u8 *)&mgmt->u;
-
- /* All members of the action enum start with action_code. */
- if (mgmt->u.action.u.s1g.action_code == WLAN_PUB_ACTION_VENDOR_SPECIFIC) {
- if (!memcmp(frame_body + 2, P2P_OUI, 4))
- on_action_public_p2p(precv_frame);
- } else {
- rtw_action_public_decache(precv_frame, frame_body[2]);
- }
-}
-
-static void OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- u8 *frame_body;
- u8 OUI_Subtype;
- u8 *pframe = precv_frame->rx_data;
- uint len = precv_frame->len;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
-
- if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
- return;
-
- len -= sizeof(struct ieee80211_hdr_3addr);
- OUI_Subtype = frame_body[5];
-
- if (OUI_Subtype == P2P_PRESENCE_REQUEST)
- process_p2p_presence_req(pwdinfo, pframe, len);
-}
-
-static void OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
-
- if (!ether_addr_equal(myid(&padapter->eeprompriv), mgmt->da))
- return;
-
- switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_BACK:
- OnAction_back(padapter, precv_frame);
- break;
- case WLAN_CATEGORY_PUBLIC:
- on_action_public(padapter, precv_frame);
- break;
- case RTW_WLAN_CATEGORY_P2P:
- OnAction_p2p(padapter, precv_frame);
- break;
- }
-}
-
-struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
-{
- struct xmit_frame *pmgntframe;
- struct xmit_buf *pxmitbuf;
-
- pmgntframe = rtw_alloc_xmitframe(pxmitpriv);
- if (!pmgntframe)
- return NULL;
-
- pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
- if (!pxmitbuf) {
- rtw_free_xmitframe(pxmitpriv, pmgntframe);
- return NULL;
- }
- pmgntframe->frame_tag = MGNT_FRAMETAG;
- pmgntframe->pxmitbuf = pxmitbuf;
- pmgntframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pmgntframe;
- return pmgntframe;
-}
-
-void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- mlme_handler mlme_sta_tbl[] = {
- OnAssocReq,
- OnAssocRsp,
- OnAssocReq,
- OnAssocRsp,
- OnProbeReq,
- OnProbeRsp,
- NULL,
- NULL,
- OnBeacon,
- NULL,
- OnDisassoc,
- OnAuthClient,
- OnDeAuth,
- OnAction,
- };
- int index;
- mlme_handler fct;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
-
- if (!ieee80211_is_mgmt(hdr->frame_control))
- return;
-
- /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) &&
- !is_broadcast_ether_addr(hdr->addr1))
- return;
-
- index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
- if (index >= ARRAY_SIZE(mlme_sta_tbl))
- return;
- fct = mlme_sta_tbl[index];
-
- if (psta) {
- if (ieee80211_has_retry(hdr->frame_control)) {
- if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum)
- /* drop the duplicate management frame */
- return;
- }
- psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
- }
-
- if (ieee80211_is_auth(hdr->frame_control)) {
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- fct = OnAuth;
- else
- fct = OnAuthClient;
- }
-
- if (fct)
- fct(padapter, precv_frame);
-}
-
-/****************************************************************************
-
-Following are some TX functions for WiFi MLME
-
-*****************************************************************************/
-
-void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- pmlmeext->tx_rate = rate;
-}
-
-void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- memset((u8 *)(pattrib), 0, sizeof(struct pkt_attrib));
-
- pattrib->hdrlen = 24;
- pattrib->nr_frags = 1;
- pattrib->priority = 7;
- pattrib->mac_id = 0;
- pattrib->qsel = 0x12;
-
- pattrib->pktlen = 0;
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- pattrib->raid = 6;/* b mode */
- else
- pattrib->raid = 5;/* a/g mode */
-
- pattrib->encrypt = _NO_PRIVACY_;
- pattrib->bswenc = false;
-
- pattrib->qos_en = false;
- pattrib->ht_en = false;
- pattrib->bwmode = HT_CHANNEL_WIDTH_20;
- pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pattrib->sgi = false;
-
- pattrib->seqnum = pmlmeext->mgnt_seq;
-
- pattrib->retry_ctrl = true;
-}
-
-void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
-{
- rtl8188eu_mgnt_xmit(padapter, pmgntframe);
-}
-
-s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
-{
- s32 ret = _FAIL;
- struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
- struct submit_ctx sctx;
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return ret;
-
- rtw_sctx_init(&sctx, timeout_ms);
- pxmitbuf->sctx = &sctx;
-
- ret = rtl8188eu_mgnt_xmit(padapter, pmgntframe);
-
- if (ret == _SUCCESS)
- ret = rtw_sctx_wait(&sctx);
-
- return ret;
-}
-
-s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
-{
- s32 ret = _FAIL;
- u32 timeout_ms = 500;/* 500ms */
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- mutex_lock(&pxmitpriv->ack_tx_mutex);
- pxmitpriv->ack_tx = true;
-
- pmgntframe->ack_report = 1;
- if (rtl8188eu_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
- ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
- }
-
- pxmitpriv->ack_tx = false;
- mutex_unlock(&pxmitpriv->ack_tx_mutex);
-
- return ret;
-}
-
-static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
-{
- u8 *ssid_ie;
- int ssid_len_ori;
- int len_diff = 0;
-
- ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
-
- if (ssid_ie && ssid_len_ori > 0) {
- switch (hidden_ssid_mode) {
- case 1: {
- u8 *next_ie = ssid_ie + 2 + ssid_len_ori;
- u32 remain_len = 0;
-
- remain_len = ies_len - (next_ie - ies);
-
- ssid_ie[1] = 0;
- memcpy(ssid_ie + 2, next_ie, remain_len);
- len_diff -= ssid_len_ori;
-
- break;
- }
- case 2:
- memset(&ssid_ie[2], 0, ssid_len_ori);
- break;
- default:
- break;
- }
- }
-
- return len_diff;
-}
-
-void issue_beacon(struct adapter *padapter, int timeout_ms)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned int rate_len;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
- spin_lock_bh(&pmlmepriv->bcn_update_lock);
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
- pattrib->qsel = 0x10;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- eth_broadcast_addr(pwlanhdr->addr1);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
- /* pmlmeext->mgnt_seq++; */
- SetFrameSubType(pframe, WIFI_BEACON);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- /* for P2P : Primary Device Type & Device Name */
- u32 wpsielen = 0, insert_len = 0;
- u8 *wpsie = NULL;
- wpsie = rtw_get_wps_ie(cur_network->IEs + _FIXED_IE_LENGTH_, cur_network->IELength - _FIXED_IE_LENGTH_, NULL, &wpsielen);
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && wpsie && wpsielen > 0) {
- uint wps_offset, remainder_ielen;
- u8 *premainder_ie, *pframe_wscie;
-
- wps_offset = (uint)(wpsie - cur_network->IEs);
- premainder_ie = wpsie + wpsielen;
- remainder_ielen = cur_network->IELength - wps_offset - wpsielen;
- pframe_wscie = pframe + wps_offset;
- memcpy(pframe, cur_network->IEs, wps_offset + wpsielen);
- pframe += (wps_offset + wpsielen);
- pattrib->pktlen += (wps_offset + wpsielen);
-
- /* now pframe is end of wsc ie, insert Primary Device Type & Device Name */
- /* Primary Device Type */
- /* Type: */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
- insert_len += 2;
-
- /* Length: */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(0x0008);
- insert_len += 2;
-
- /* Value: */
- /* Category ID */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- insert_len += 2;
-
- /* OUI */
- *(__be32 *)(pframe + insert_len) = cpu_to_be32(WPSOUI);
- insert_len += 4;
-
- /* Sub Category ID */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- insert_len += 2;
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- insert_len += 2;
-
- /* Length: */
- *(__be16 *)(pframe + insert_len) = cpu_to_be16(pwdinfo->device_name_len);
- insert_len += 2;
-
- /* Value: */
- memcpy(pframe + insert_len, pwdinfo->device_name, pwdinfo->device_name_len);
- insert_len += pwdinfo->device_name_len;
-
- /* update wsc ie length */
- *(pframe_wscie + 1) = (wpsielen - 2) + insert_len;
-
- /* pframe move to end */
- pframe += insert_len;
- pattrib->pktlen += insert_len;
-
- /* copy remainder_ie to pframe */
- memcpy(pframe, premainder_ie, remainder_ielen);
- pframe += remainder_ielen;
- pattrib->pktlen += remainder_ielen;
- } else {
- int len_diff;
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- len_diff = update_hidden_ssid(
- pframe + _BEACON_IE_OFFSET_
- , cur_network->IELength - _BEACON_IE_OFFSET_
- , pmlmeinfo->hidden_ssid_mode
- );
- pframe += (cur_network->IELength + len_diff);
- pattrib->pktlen += (cur_network->IELength + len_diff);
- }
-
- {
- u8 *wps_ie;
- uint wps_ielen;
- u8 sr = 0;
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
- pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
- if (wps_ie && wps_ielen > 0)
- rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
- if (sr != 0)
- set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
- else
- _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- u32 len;
- len = build_beacon_p2p_ie(pwdinfo, pframe);
-
- pframe += len;
- pattrib->pktlen += len;
- }
-
- goto _issue_bcn;
- }
-
- /* below for ad-hoc mode */
-
- /* timestamp will be inserted by hardware */
- pframe += 8;
- pattrib->pktlen += 8;
-
- /* beacon interval: 2 bytes */
-
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* capability info: 2 bytes */
-
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pattrib->pktlen);
-
- {
- u8 erpinfo = 0;
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- ATIMWindow = 0;
- pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
-
- /* ERP IE */
- pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
- }
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
- /* todo:HT for adhoc */
-_issue_bcn:
-
- pmlmepriv->update_bcn = false;
-
- spin_unlock_bh(&pmlmepriv->bcn_update_lock);
-
- if ((pattrib->pktlen + TXDESC_SIZE) > 512)
- return;
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (timeout_ms > 0)
- dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
- else
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned char *mac, *bssid;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u8 *pwps_ie;
- uint wps_ielen;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- unsigned int rate_len;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&padapter->eeprompriv);
- bssid = cur_network->MacAddress;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(fctrl, WIFI_PROBERSP);
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = pattrib->hdrlen;
- pframe += pattrib->hdrlen;
-
- if (cur_network->IELength > MAX_IE_SZ)
- return;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- pwps_ie = rtw_get_wps_ie(cur_network->IEs + _FIXED_IE_LENGTH_, cur_network->IELength - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
-
- /* inerset & update wps_probe_resp_ie */
- if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
- uint wps_offset, remainder_ielen;
- u8 *premainder_ie;
-
- wps_offset = (uint)(pwps_ie - cur_network->IEs);
-
- premainder_ie = pwps_ie + wps_ielen;
-
- remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
-
- memcpy(pframe, cur_network->IEs, wps_offset);
- pframe += wps_offset;
- pattrib->pktlen += wps_offset;
-
- wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
- if ((wps_offset + wps_ielen + 2) <= MAX_IE_SZ) {
- memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen + 2);
- pframe += wps_ielen + 2;
- pattrib->pktlen += wps_ielen + 2;
- }
-
- if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
- memcpy(pframe, premainder_ie, remainder_ielen);
- pframe += remainder_ielen;
- pattrib->pktlen += remainder_ielen;
- }
- } else {
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- pframe += cur_network->IELength;
- pattrib->pktlen += cur_network->IELength;
- }
- } else {
- /* timestamp will be inserted by hardware */
- pframe += 8;
- pattrib->pktlen += 8;
-
- /* beacon interval: 2 bytes */
-
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* capability info: 2 bytes */
-
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* below for ad-hoc mode */
-
- /* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pattrib->pktlen);
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
- u8 erpinfo = 0;
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- /* ATIMWindow = cur->Configuration.ATIMWindow; */
- ATIMWindow = 0;
- pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
-
- /* ERP IE */
- pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
- }
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
- /* todo:HT for adhoc */
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && is_valid_p2p_probereq) {
- u32 len;
- len = build_probe_resp_p2p_ie(pwdinfo, pframe);
-
- pframe += len;
- pattrib->pktlen += len;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned char *mac;
- unsigned char bssrate[NumRates];
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int bssrate_len = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&padapter->eeprompriv);
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- if (da) {
- /* unicast probe request frame */
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr3, da, ETH_ALEN);
- } else {
- /* broadcast probe request frame */
- eth_broadcast_addr(pwlanhdr->addr1);
- eth_broadcast_addr(pwlanhdr->addr3);
- }
-
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_PROBEREQ);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- if (pssid)
- pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &pattrib->pktlen);
- else
- pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &pattrib->pktlen);
-
- get_rate_set(padapter, bssrate, &bssrate_len);
-
- if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &pattrib->pktlen);
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, bssrate_len - 8, bssrate + 8, &pattrib->pktlen);
- } else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &pattrib->pktlen);
- }
-
- /* add wps_ie for wps2.0 */
- if (pmlmepriv->wps_probe_req_ie_len > 0 && pmlmepriv->wps_probe_req_ie) {
- memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
- pframe += pmlmepriv->wps_probe_req_ie_len;
- pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack) {
- ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
- } else {
- dump_mgntframe(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
-{
- _issue_probereq(padapter, pssid, da, false);
-}
-
-void issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
-{
- int i;
-
- for (i = 0; i < 3; i++) {
- if (_issue_probereq(padapter, pssid, da, true) == _FAIL)
- msleep(1);
- else
- break;
- }
-}
-
-/* if psta == NULL, indicate we are station (client) now... */
-void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned int val32;
- u16 val16;
- __le16 le_val16;
- int use_shared_key = 0;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_AUTH);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- if (psta) {/* for AP mode */
- memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
-
- /* setting auth algo number */
- val16 = (u16)psta->authalg;
-
- if (status != _STATS_SUCCESSFUL_)
- val16 = 0;
-
- if (val16) {
- le_val16 = cpu_to_le16(val16);
- use_shared_key = 1;
- } else {
- le_val16 = 0;
- }
-
- pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_val16, &pattrib->pktlen);
-
- /* setting auth seq number */
- val16 = (u16)psta->auth_seq;
- le_val16 = cpu_to_le16(val16);
- pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_val16, &pattrib->pktlen);
-
- /* setting status code... */
- val16 = status;
- le_val16 = cpu_to_le16(val16);
- pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_val16, &pattrib->pktlen);
-
- /* added challenging text... */
- if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
- pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, psta->chg_txt, &pattrib->pktlen);
- } else {
- __le32 le_tmp32;
- __le16 le_tmp16;
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- /* setting auth algo number */
- val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
- if (val16)
- use_shared_key = 1;
-
- /* setting IV for auth seq #3 */
- if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
- val32 = ((pmlmeinfo->iv++) | (pmlmeinfo->key_index << 30));
- le_tmp32 = cpu_to_le32(val32);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&le_tmp32, &pattrib->pktlen);
-
- pattrib->iv_len = 4;
- }
-
- le_tmp16 = cpu_to_le16(val16);
- pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp16, &pattrib->pktlen);
-
- /* setting auth seq number */
- val16 = pmlmeinfo->auth_seq;
- le_tmp16 = cpu_to_le16(val16);
- pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp16, &pattrib->pktlen);
-
- /* setting status code... */
- le_tmp16 = cpu_to_le16(status);
- pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp16, &pattrib->pktlen);
-
- /* then checking to see if sending challenging text... */
- if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
- pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, pmlmeinfo->chg_txt, &pattrib->pktlen);
-
- SetPrivacy(fctrl);
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
-
- pattrib->encrypt = _WEP40_;
-
- pattrib->icv_len = 4;
-
- pattrib->pktlen += pattrib->icv_len;
- }
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- rtw_wep_encrypt(padapter, pmgntframe);
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
-{
- struct xmit_frame *pmgntframe;
- struct ieee80211_hdr *pwlanhdr;
- struct pkt_attrib *pattrib;
- unsigned char *pbuf, *pframe;
- unsigned short val;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- u8 *ie = pnetwork->IEs;
- __le16 lestatus, leval;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
- memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
- SetFrameSubType(pwlanhdr, pkt_type);
- else
- return;
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen += pattrib->hdrlen;
- pframe += pattrib->hdrlen;
-
- /* capability */
- val = *(unsigned short *)rtw_get_capability_from_ie(ie);
-
- pframe = rtw_set_fixed_ie(pframe, _CAPABILITY_, (unsigned char *)&val, &pattrib->pktlen);
-
- lestatus = cpu_to_le16(status);
- pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&lestatus, &pattrib->pktlen);
-
- leval = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
- pframe = rtw_set_fixed_ie(pframe, _ASOC_ID_, (unsigned char *)&leval, &pattrib->pktlen);
-
- if (pstat->bssratelen <= 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &pattrib->pktlen);
- } else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &pattrib->pktlen);
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen - 8, pstat->bssrateset + 8, &pattrib->pktlen);
- }
-
- if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
- uint ie_len = 0;
-
- /* FILL HT CAP INFO IE */
- pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
- if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len + 2);
- pframe += (ie_len + 2);
- pattrib->pktlen += (ie_len + 2);
- }
-
- /* FILL HT ADD INFO IE */
- pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
- if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len + 2);
- pframe += (ie_len + 2);
- pattrib->pktlen += (ie_len + 2);
- }
- }
-
- /* FILL WMM IE */
- if ((pstat->flags & WLAN_STA_WME) && (pmlmepriv->qospriv.qos_option)) {
- uint ie_len = 0;
- unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
-
- for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
- pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if (pbuf && !memcmp(pbuf + 2, WMM_PARA_IE, 6)) {
- memcpy(pframe, pbuf, ie_len + 2);
- pframe += (ie_len + 2);
- pattrib->pktlen += (ie_len + 2);
- break;
- }
-
- if (!pbuf || ie_len == 0)
- break;
- }
- }
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &pattrib->pktlen);
-
- /* add WPS IE ie for wps 2.0 */
- if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
- memcpy(pframe, pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len);
-
- pframe += pmlmepriv->wps_assoc_resp_ie_len;
- pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && (pstat->is_p2p_device)) {
- u32 len;
-
- len = build_assoc_resp_p2p_ie(pwdinfo, pframe, pstat->p2p_status_code);
-
- pframe += len;
- pattrib->pktlen += len;
- }
- pattrib->last_txcmdsz = pattrib->pktlen;
- dump_mgntframe(padapter, pmgntframe);
-}
-
-void issue_assocreq(struct adapter *padapter)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe, *p;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- __le16 le_tmp;
- unsigned int i, j, ie_len, index = 0;
- unsigned char bssrate[NumRates], sta_bssrate[NumRates];
- struct ndis_802_11_var_ie *pIE;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- int bssrate_len = 0, sta_bssrate_len = 0;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 p2pie[255] = { 0x00 };
- u16 p2pielen = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ASSOCREQ);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* caps */
-
- memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.IEs), 2);
-
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* listen interval */
- /* todo: listen interval for power saving */
- le_tmp = cpu_to_le16(3);
- memcpy(pframe, (unsigned char *)&le_tmp, 2);
- pframe += 2;
- pattrib->pktlen += 2;
-
- /* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.Ssid.SsidLength, pmlmeinfo->network.Ssid.Ssid, &pattrib->pktlen);
-
- /* supported rate & extended supported rate */
-
- /* Check if the AP's supported rates are also supported by STA. */
- get_rate_set(padapter, sta_bssrate, &sta_bssrate_len);
-
- if (pmlmeext->cur_channel == 14)/* for JAPAN, channel 14 can only uses B Mode(CCK) */
- sta_bssrate_len = 4;
-
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- if (pmlmeinfo->network.SupportedRates[i] == 0)
- break;
-
- /* Check if the AP's supported rates are also supported by STA. */
- for (j = 0; j < sta_bssrate_len; j++) {
- /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
- if ((pmlmeinfo->network.SupportedRates[i] | IEEE80211_BASIC_RATE_MASK)
- == (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK))
- break;
- }
-
- if (j != sta_bssrate_len)
- /* the rate is supported by STA */
- bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
- }
-
- bssrate_len = index;
-
- if (bssrate_len == 0) {
- rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
- rtw_free_xmitframe(pxmitpriv, pmgntframe);
- goto exit; /* don't connect to AP if no joint supported rate */
- }
-
- if (bssrate_len > 8) {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &pattrib->pktlen);
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, bssrate_len - 8, bssrate + 8, &pattrib->pktlen);
- } else {
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &pattrib->pktlen);
- }
-
- /* RSN */
- p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
- if (p)
- pframe = rtw_set_ie(pframe, _RSN_IE_2_, ie_len, p + 2, &pattrib->pktlen);
-
- /* HT caps */
- if (padapter->mlmepriv.htpriv.ht_option) {
- p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
- if (p && !is_ap_in_tkip(padapter)) {
- memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct HT_caps_element));
-
- /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
- if (pregpriv->cbw40_enable == 0)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info &= cpu_to_le16(~(BIT(6) | BIT(1)));
- else
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(BIT(1));
-
- /* todo: disable SM power save mode */
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
-
- if (pregpriv->rx_stbc)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
-
- pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&pmlmeinfo->HT_caps), &pattrib->pktlen);
- }
- }
-
- /* vendor specific IE, such as WPA, WMM, WPS */
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
- (!memcmp(pIE->data, WMM_OUI, 4)) ||
- (!memcmp(pIE->data, WPS_OUI, 4))) {
- if (!padapter->registrypriv.wifi_spec) {
- /* Commented by Kurt 20110629 */
- /* In some older APs, WPS handshake */
- /* would be fail if we append vendor extension information to AP */
- if (!memcmp(pIE->data, WPS_OUI, 4))
- pIE->Length = 14;
- }
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, pIE->Length, pIE->data, &pattrib->pktlen);
- }
- break;
- default:
- break;
- }
- i += (pIE->Length + 2);
- }
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &pattrib->pktlen);
-
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
- /* Should add the P2P IE in the association request frame. */
- /* P2P OUI */
-
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20101109 */
- /* According to the P2P Specification, the association request frame should contain 3 P2P attributes */
- /* 1. P2P Capability */
- /* 2. Extended Listen Timing */
- /* 3. Device Info */
- /* Commented by Albert 20110516 */
- /* 4. P2P Interface */
-
- /* P2P Capability */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
- else
- p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
-
- /* Extended Listen Timing */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
- p2pielen += 2;
-
- /* Value: */
- /* Availability Period */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- /* Availability Interval */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
- p2pielen += 2;
-
- /* Device Info */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
- if ((pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN) ||
- (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN))
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
- else
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC);
-
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- /* P2P Interface */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_INTERFACE;
-
- /* Length: */
- *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x000D);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Device Address */
- p2pielen += ETH_ALEN;
-
- p2pie[p2pielen++] = 1; /* P2P Interface Address Count */
-
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Interface Address List */
- p2pielen += ETH_ALEN;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
- dump_mgntframe(padapter, pmgntframe);
-
- ret = _SUCCESS;
-
-exit:
- if (ret == _SUCCESS)
- rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
- else
- kfree(pmlmepriv->assoc_req);
-}
-
-/* when wait_ack is true, this function should be called at process context */
-static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
-
- if (!padapter)
- goto exit;
-
- pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
- SetFrDs(fctrl);
- else if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE)
- SetToDs(fctrl);
-
- if (power_mode)
- SetPwrMgt(fctrl);
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_DATA_NULL);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack) {
- ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
- } else {
- dump_mgntframe(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-/* when wait_ms > 0, this function should be called at process context */
-/* da == NULL for station mode */
-int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* da == NULL, assume it's null data for sta to ap*/
- if (!da)
- da = get_my_bssid(&pmlmeinfo->network);
-
- do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
- } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-exit:
- return ret;
-}
-
-/* when wait_ack is true, this function should be called at process context */
-static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- unsigned short *qc;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- pattrib->hdrlen += 2;
- pattrib->qos_en = true;
- pattrib->eosp = 1;
- pattrib->ack_policy = 0;
- pattrib->mdata = 0;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
- SetFrDs(fctrl);
- else if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE)
- SetToDs(fctrl);
-
- qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
-
- SetPriority(qc, tid);
-
- SetEOSP(qc, pattrib->eosp);
-
- SetAckpolicy(qc, pattrib->ack_policy);
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
-
- pframe += sizeof(struct ieee80211_qos_hdr);
- pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack) {
- ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
- } else {
- dump_mgntframe(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-/* when wait_ms > 0 , this function should be called at process context */
-/* da == NULL for station mode */
-int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* da == NULL, assume it's null data for sta to ap*/
- if (!da)
- da = get_my_bssid(&pmlmeinfo->network);
-
- do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
- } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-exit:
- return ret;
-}
-
-static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- int ret = _FAIL;
- __le16 le_tmp;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (!(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) && (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
- }
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_DEAUTH);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- le_tmp = cpu_to_le16(reason);
- pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack) {
- ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
- } else {
- dump_mgntframe(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
-{
- return _issue_deauth(padapter, da, reason, false);
-}
-
-int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt,
- int wait_ms)
-{
- int ret;
- int i = 0;
-
- do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
- } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-exit:
- return ret;
-}
-
-void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action,
- u16 status, struct ieee80211_mgmt *mgmt_req)
-{
- u16 start_seq;
- u16 BA_starting_seqctrl = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct ieee80211_mgmt *mgmt;
- u16 capab, params;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET);
-
- mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION | IEEE80211_FTYPE_MGMT);
-
- memcpy(mgmt->da, raddr, ETH_ALEN);
- memcpy(mgmt->sa, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(mgmt->bssid, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- mgmt->seq_ctrl = cpu_to_le16(pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
-
- mgmt->u.action.category = WLAN_CATEGORY_BACK;
-
- switch (action) {
- case WLAN_ACTION_ADDBA_REQ:
- mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
- do {
- pmlmeinfo->dialogToken++;
- } while (pmlmeinfo->dialogToken == 0);
- mgmt->u.action.u.addba_req.dialog_token = pmlmeinfo->dialogToken;
-
- /* immediate ack & 64 buffer size */
- capab = u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
- capab |= u16_encode_bits(1, IEEE80211_ADDBA_PARAM_POLICY_MASK);
- capab |= u16_encode_bits(status, IEEE80211_ADDBA_PARAM_TID_MASK);
- mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
-
- mgmt->u.action.u.addba_req.timeout = cpu_to_le16(5000); /* 5 ms */
-
- psta = rtw_get_stainfo(pstapriv, raddr);
- if (psta) {
- start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1;
-
- psta->BA_starting_seqctrl[status & 0x07] = start_seq;
-
- BA_starting_seqctrl = start_seq << 4;
- }
- mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(BA_starting_seqctrl);
-
- pattrib->pktlen = offsetofend(struct ieee80211_mgmt,
- u.action.u.addba_req.start_seq_num);
- break;
- case WLAN_ACTION_ADDBA_RESP:
- mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
- mgmt->u.action.u.addba_resp.dialog_token = mgmt_req->u.action.u.addba_req.dialog_token;
- mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
- capab = le16_to_cpu(mgmt_req->u.action.u.addba_req.capab) & 0x3f;
- capab |= u16_encode_bits(64, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
- capab |= u16_encode_bits(pregpriv->ampdu_amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK);
- mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
- mgmt->u.action.u.addba_resp.timeout = mgmt_req->u.action.u.addba_req.timeout;
- pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.addba_resp.timeout);
- break;
- case WLAN_ACTION_DELBA:
- mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
- mgmt->u.action.u.delba.params = cpu_to_le16((status & 0x1F) << 3);
- params = u16_encode_bits((status & 0x1), IEEE80211_DELBA_PARAM_INITIATOR_MASK);
- params |= u16_encode_bits((status >> 1) & 0xF, IEEE80211_DELBA_PARAM_TID_MASK);
- mgmt->u.action.u.delba.params = cpu_to_le16(params);
- mgmt->u.action.u.delba.reason_code = cpu_to_le16(WLAN_STATUS_REQUEST_DECLINED);
- pattrib->pktlen = offsetofend(struct ieee80211_mgmt, u.action.u.delba.reason_code);
- break;
- default:
- break;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_action_BSSCoexistPacket(struct adapter *padapter)
-{
- struct list_head *plist, *phead;
- unsigned char category, action;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct wlan_network *pnetwork = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- u8 InfoContent[16] = {0};
- u8 ICS[8][15];
- if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
- return;
-
- if (pmlmeinfo->bwmode_updated)
- return;
-
- category = WLAN_CATEGORY_PUBLIC;
- action = ACT_PUBLIC_BSSCOEXIST;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
-
- /* */
- if (pmlmepriv->num_FortyMHzIntolerant > 0) {
- u8 iedata = 0;
-
- iedata |= BIT(2);/* 20 MHz BSS Width Request */
-
- pframe = rtw_set_ie(pframe, EID_BSSCoexistence, 1, &iedata, &pattrib->pktlen);
- }
-
- /* */
- memset(ICS, 0, sizeof(ICS));
- if (pmlmepriv->num_sta_no_ht > 0) {
- int i;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- int len;
- u8 *p;
- struct wlan_bssid_ex *pbss_network;
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- plist = plist->next;
-
- pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
-
- p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
- if (!p || len == 0) { /* non-HT */
- if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
- continue;
-
- ICS[0][pbss_network->Configuration.DSConfig] = 1;
-
- if (ICS[0][0] == 0)
- ICS[0][0] = 1;
- }
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- for (i = 0; i < 8; i++) {
- if (ICS[i][0] == 1) {
- int j, k = 0;
-
- InfoContent[k] = i;
- /* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
- k++;
-
- for (j = 1; j <= 14; j++) {
- if (ICS[i][j] == 1) {
- if (k < 16) {
- InfoContent[k] = j; /* channel number */
- /* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
- k++;
- }
- }
- }
-
- pframe = rtw_set_ie(pframe, EID_BSSIntolerantChlReport, k, InfoContent, &pattrib->pktlen);
- }
- }
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta = NULL;
- /* struct recv_reorder_ctrl *preorder_ctrl; */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u16 tid;
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
- return _SUCCESS;
-
- psta = rtw_get_stainfo(pstapriv, addr);
- if (!psta)
- return _SUCCESS;
-
- if (initiator == 0) { /* recipient */
- for (tid = 0; tid < MAXTID; tid++) {
- if (psta->recvreorder_ctrl[tid].enable) {
- issue_action_BA(padapter, addr, WLAN_ACTION_DELBA,
- (((tid << 1) | initiator) & 0x1F), NULL);
- psta->recvreorder_ctrl[tid].enable = false;
- psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
- }
- }
- } else if (initiator == 1) { /* originator */
- for (tid = 0; tid < MAXTID; tid++) {
- if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
- issue_action_BA(padapter, addr, WLAN_ACTION_DELBA,
- (((tid << 1) | initiator) & 0x1F), NULL);
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- }
- }
- }
-
- return _SUCCESS;
-}
-
-unsigned int send_beacon(struct adapter *padapter)
-{
- bool bxmitok = false;
- int issue = 0;
- int poll = 0;
-
- clear_beacon_valid_bit(padapter);
-
- do {
- issue_beacon(padapter, 100);
- issue++;
- do {
- yield();
- bxmitok = get_beacon_valid_bit(padapter);
- poll++;
- } while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
- } while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped || !bxmitok)
- return _FAIL;
-
- return _SUCCESS;
-}
-
-bool get_beacon_valid_bit(struct adapter *adapter)
-{
- int res;
- u8 reg;
-
- res = rtw_read8(adapter, REG_TDECTRL + 2, &reg);
- if (res)
- return false;
-
- /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
- return BIT(0) & reg;
-}
-
-void clear_beacon_valid_bit(struct adapter *adapter)
-{
- int res;
- u8 reg;
-
- res = rtw_read8(adapter, REG_TDECTRL + 2, &reg);
- if (res)
- return;
-
- /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
- rtw_write8(adapter, REG_TDECTRL + 2, reg | BIT(0));
-}
-
-void rtw_resume_tx_beacon(struct adapter *adapt)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
- /* which should be read from register to a global variable. */
-
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6));
- haldata->RegFwHwTxQCtrl |= BIT(6);
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff);
- haldata->RegReg542 |= BIT(0);
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
-}
-
-void rtw_stop_tx_beacon(struct adapter *adapt)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
- /* which should be read from register to a global variable. */
-
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6)));
- haldata->RegFwHwTxQCtrl &= (~BIT(6));
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64);
- haldata->RegReg542 &= ~(BIT(0));
- rtw_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
-
- /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
-}
-
-static void rtw_set_opmode(struct adapter *adapter, u8 mode)
-{
- u8 val8;
- int res;
-
- /* disable Port0 TSF update */
- res = rtw_read8(adapter, REG_BCN_CTRL, &val8);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, val8 | BIT(4));
-
- /* set net_type */
- res = rtw_read8(adapter, MSR, &val8);
- if (res)
- return;
-
- val8 &= 0x0c;
- val8 |= mode;
- rtw_write8(adapter, MSR, val8);
-
- if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
- rtw_stop_tx_beacon(adapter);
-
- rtw_write8(adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
- } else if (mode == _HW_STATE_ADHOC_) {
- rtw_resume_tx_beacon(adapter);
- rtw_write8(adapter, REG_BCN_CTRL, 0x1a);
- } else if (mode == _HW_STATE_AP_) {
- rtw_resume_tx_beacon(adapter);
-
- rtw_write8(adapter, REG_BCN_CTRL, 0x12);
-
- /* Set RCR */
- rtw_write32(adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */
- /* enable to rx data frame */
- rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
- /* enable to rx ps-poll */
- rtw_write16(adapter, REG_RXFLTMAP1, 0x0400);
-
- /* Beacon Control related register for first time */
- rtw_write8(adapter, REG_BCNDMATIM, 0x02); /* 2ms */
-
- rtw_write8(adapter, REG_ATIMWND, 0x0a); /* 10ms */
- rtw_write16(adapter, REG_BCNTCFG, 0x00);
- rtw_write16(adapter, REG_TBTT_PROHIBIT, 0xff04);
- rtw_write16(adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
-
- /* reset TSF */
- rtw_write8(adapter, REG_DUAL_TSF_RST, BIT(0));
-
- /* BIT(3) - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */
- res = rtw_read8(adapter, REG_MBID_NUM, &val8);
- if (res)
- return;
-
- rtw_write8(adapter, REG_MBID_NUM, val8 | BIT(3) | BIT(4));
-
- /* enable BCN0 Function for if1 */
- /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
- rtw_write8(adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1)));
-
- /* dis BCN1 ATIM WND if if2 is station */
- res = rtw_read8(adapter, REG_BCN_CTRL_1, &val8);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL_1, val8 | BIT(0));
- }
-}
-
-/****************************************************************************
-
-Following are some utility functions for WiFi MLME
-
-*****************************************************************************/
-
-static void rtw_set_initial_gain(struct adapter *adapter, u8 gain)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
- struct rtw_dig *digtable = &odmpriv->DM_DigTable;
-
- if (gain == 0xff) {
- /* restore rx gain */
- ODM_Write_DIG(odmpriv, digtable->BackupIGValue);
- } else {
- digtable->BackupIGValue = digtable->CurIGValue;
- ODM_Write_DIG(odmpriv, gain);
- }
-}
-
-void rtw_mlme_under_site_survey(struct adapter *adapter)
-{
- /* config RCR to receive different BSSID & not to receive data frame */
-
- int res;
- u8 reg;
- u32 v;
-
- res = rtw_read32(adapter, REG_RCR, &v);
- if (res)
- return;
-
- v &= ~(RCR_CBSSID_BCN);
- rtw_write32(adapter, REG_RCR, v);
- /* reject all data frame */
- rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
-
- /* disable update TSF */
- res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4));
-}
-
-void rtw_mlme_site_survey_done(struct adapter *adapter)
-{
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u32 reg32;
- int res;
- u8 reg;
-
- if ((r8188eu_is_client_associated_to_ap(adapter)) ||
- ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) {
- /* enable to rx data frame */
- rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
-
- /* enable update TSF */
- res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
- } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
- /* enable update TSF */
- res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
- }
-
- res = rtw_read32(adapter, REG_RCR, &reg32);
- if (res)
- return;
-
- rtw_write32(adapter, REG_RCR, reg32 | RCR_CBSSID_BCN);
-}
-
-void site_survey(struct adapter *padapter)
-{
- unsigned char survey_channel = 0;
- enum rt_scan_type ScanType = SCAN_PASSIVE;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
- if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
- survey_channel = pwdinfo->rx_invitereq_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
- } else {
- survey_channel = pwdinfo->p2p_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
- }
- ScanType = SCAN_ACTIVE;
- } else if (rtw_p2p_findphase_ex_is_social(pwdinfo)) {
- /* Commented by Albert 2011/06/03 */
- /* The driver is in the find phase, it should go through the social channel. */
- int ch_set_idx;
- survey_channel = pwdinfo->social_chan[pmlmeext->sitesurvey_res.channel_idx];
- ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, survey_channel);
- if (ch_set_idx >= 0)
- ScanType = pmlmeext->channel_set[ch_set_idx].ScanType;
- else
- ScanType = SCAN_ACTIVE;
- } else {
- struct rtw_ieee80211_channel *ch;
- if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
- ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
- survey_channel = ch->hw_value;
- ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
- }
- }
-
- if (survey_channel != 0) {
- if (pmlmeext->sitesurvey_res.channel_idx == 0)
- set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- else
- SelectChannel(padapter, survey_channel);
-
- if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) ||
- rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
- issue_probereq_p2p(padapter);
- issue_probereq_p2p(padapter);
- issue_probereq_p2p(padapter);
- } else {
- int i;
- for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
- /* todo: to issue two probe req??? */
- issue_probereq(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
- /* msleep(SURVEY_TO>>1); */
- issue_probereq(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
- }
- }
-
- if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
- /* todo: to issue two probe req??? */
- issue_probereq(padapter, NULL, NULL);
- /* msleep(SURVEY_TO>>1); */
- issue_probereq(padapter, NULL, NULL);
- }
- }
- }
-
- set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
- } else {
- /* channel number is 0 or this channel is not valid. */
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
- if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
- /* Set the find_phase_state_exchange_cnt to P2P_FINDPHASE_EX_CNT. */
- /* This will let the following flow to run the scanning end. */
- rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
- }
- }
-
- if (rtw_p2p_findphase_ex_is_needed(pwdinfo)) {
- /* Set the P2P State to the listen state of find phase and set the current channel to the listen channel */
- set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
- pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
-
- /* restore RX GAIN */
- rtw_set_initial_gain(padapter, 0xff);
- /* turn on dynamic functions */
- Restore_DM_Func_Flag(padapter);
- /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
-
- _set_timer(&pwdinfo->find_phase_timer, (u32)((u32)(pwdinfo->listen_dwell) * 100));
- } else {
- /* 20100721:Interrupt scan operation here. */
- /* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
- /* It compares the scan result and selects a better one to do connection. */
- if (AntDivBeforeLink8188E(padapter)) {
- pmlmeext->sitesurvey_res.bss_cnt = 0;
- pmlmeext->sitesurvey_res.channel_idx = -1;
- pmlmeext->chan_scan_time = SURVEY_TO / 2;
- set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
- return;
- }
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
- rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
- rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
-
- pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
-
- /* switch back to the original channel */
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN))
- set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- else
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- /* config MSR */
- Set_MSR(padapter, (pmlmeinfo->state & 0x3));
-
- /* restore RX GAIN */
- rtw_set_initial_gain(padapter, 0xff);
- /* turn on dynamic functions */
- Restore_DM_Func_Flag(padapter);
- /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
-
- if (r8188eu_is_client_associated_to_ap(padapter))
- issue_nulldata(padapter, NULL, 0, 3, 500);
-
- rtw_mlme_site_survey_done(padapter);
-
- report_surveydone_event(padapter);
-
- pmlmeext->chan_scan_time = SURVEY_TO;
- pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
-
- issue_action_BSSCoexistPacket(padapter);
- issue_action_BSSCoexistPacket(padapter);
- issue_action_BSSCoexistPacket(padapter);
- }
- }
-}
-
-/* collect bss info from Beacon and Probe request/response frames. */
-u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
- int i;
- u32 len;
- u8 *p;
- u16 val16;
- u8 *pframe = precv_frame->rx_data;
- u32 packet_len = precv_frame->len;
- u8 ie_offset;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- __le32 le32_tmp;
-
- len = packet_len - sizeof(struct ieee80211_hdr_3addr);
-
- if (len > MAX_IE_SZ)
- return _FAIL;
-
- memset(bssid, 0, sizeof(struct wlan_bssid_ex));
-
- if (ieee80211_is_beacon(mgmt->frame_control)) {
- bssid->Reserved[0] = 1;
- ie_offset = _BEACON_IE_OFFSET_;
- } else if (ieee80211_is_probe_req(mgmt->frame_control)) {
- ie_offset = _PROBEREQ_IE_OFFSET_;
- bssid->Reserved[0] = 2;
- } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
- ie_offset = _PROBERSP_IE_OFFSET_;
- bssid->Reserved[0] = 3;
- } else {
- bssid->Reserved[0] = 0;
- ie_offset = _FIXED_IE_LENGTH_;
- }
-
- bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
-
- /* below is to copy the information element */
- bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
-
- /* get the signal strength */
- bssid->Rssi = precv_frame->attrib.phy_info.recvpower; /* in dBM.raw data */
- bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
- bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
- bssid->PhyInfo.Optimum_antenna = rtw_current_antenna(padapter);
-
- /* checking SSID */
- p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
- if (!p)
- return _FAIL;
-
- if (*(p + 1)) {
- if (len > NDIS_802_11_LENGTH_SSID)
- return _FAIL;
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- }
-
- memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
-
- /* checking rate info... */
- i = 0;
- p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
- if (p) {
- if (len > NDIS_802_11_LENGTH_RATES_EX)
- return _FAIL;
- memcpy(bssid->SupportedRates, (p + 2), len);
- i = len;
- }
-
- p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
- if (p) {
- if (len > (NDIS_802_11_LENGTH_RATES_EX - i))
- return _FAIL;
- memcpy(bssid->SupportedRates + i, (p + 2), len);
- }
-
- if (bssid->IELength < 12)
- return _FAIL;
-
- /* Checking for DSConfig */
- p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
-
- bssid->Configuration.DSConfig = 0;
- bssid->Configuration.Length = 0;
-
- if (p) {
- bssid->Configuration.DSConfig = *(p + 2);
- } else {/* In 5G, some ap do not have DSSET IE */
- /* checking HT info for channel */
- p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
- if (p) {
- struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
- bssid->Configuration.DSConfig = HT_info->primary_channel;
- } else { /* use current channel */
- bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter);
- }
- }
-
- memcpy(&le32_tmp, rtw_get_beacon_interval_from_ie(bssid->IEs), 2);
- bssid->Configuration.BeaconPeriod = le32_to_cpu(le32_tmp);
-
- val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
-
- if (val16 & BIT(0)) {
- bssid->InfrastructureMode = Ndis802_11Infrastructure;
- memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
- } else {
- bssid->InfrastructureMode = Ndis802_11IBSS;
- memcpy(bssid->MacAddress, GetAddr3Ptr(pframe), ETH_ALEN);
- }
-
- if (val16 & BIT(4))
- bssid->Privacy = 1;
- else
- bssid->Privacy = 0;
-
- bssid->Configuration.ATIMWindow = 0;
-
- /* 20/40 BSS Coexistence check */
- if ((pregistrypriv->wifi_spec == 1) && (!pmlmeinfo->bwmode_updated)) {
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
- if (p && len > 0) {
- struct HT_caps_element *pHT_caps;
- pHT_caps = (struct HT_caps_element *)(p + 2);
-
- if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & BIT(14))
- pmlmepriv->num_FortyMHzIntolerant++;
- } else {
- pmlmepriv->num_sta_no_ht++;
- }
- }
-
- /* mark bss info receiving from nearby channel as SignalQuality 101 */
- if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
- bssid->PhyInfo.SignalQuality = 101;
- return _SUCCESS;
-}
-
-static void rtw_set_bssid(struct adapter *adapter, u8 *bssid)
-{
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- rtw_write8(adapter, REG_BSSID + i, bssid[i]);
-}
-
-static void mlme_join(struct adapter *adapter, int type)
-{
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
- u8 retry_limit = 0x30, reg;
- u32 reg32;
- int res;
-
- switch (type) {
- case 0:
- /* prepare to join */
- /* enable to rx data frame, accept all data frame */
- rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
-
- res = rtw_read32(adapter, REG_RCR, &reg32);
- if (res)
- return;
-
- rtw_write32(adapter, REG_RCR,
- reg32 | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
- retry_limit = 48;
- } else {
- /* ad-hoc mode */
- retry_limit = 0x7;
- }
- break;
- case 1:
- /* joinbss_event call back when join res < 0 */
- rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
- break;
- case 2:
- /* sta add event call back */
- /* enable update TSF */
- res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, reg & (~BIT(4)));
-
- if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
- retry_limit = 0x7;
- break;
- default:
- break;
- }
-
- rtw_write16(adapter, REG_RL,
- retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT);
-}
-
-void start_create_ibss(struct adapter *padapter)
-{
- unsigned short caps;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
- pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
- pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
-
- /* update wireless mode */
- update_wireless_mode(padapter);
-
- /* update capability */
- caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
- update_capinfo(padapter, caps);
- if (caps & cap_IBSS) {/* adhoc master */
- rtw_write8(padapter, REG_SECCFG, 0xcf);
-
- /* switch channel */
- /* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
- set_channel_bwmode(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
-
- beacon_timing_control(padapter);
-
- /* set msr to WIFI_FW_ADHOC_STATE */
- pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
- Set_MSR(padapter, (pmlmeinfo->state & 0x3));
-
- /* issue beacon */
- if (send_beacon(padapter) == _FAIL) {
- report_join_res(padapter, -1);
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- } else {
- rtw_set_bssid(padapter, padapter->registrypriv.dev_network.MacAddress);
- mlme_join(padapter, 0);
-
- report_join_res(padapter, 1);
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
- rtw_indicate_connect(padapter);
- }
- } else {
- return;
- }
- /* update bc/mc sta_info */
- update_bmc_sta(padapter);
-}
-
-void start_clnt_join(struct adapter *padapter)
-{
- unsigned short caps;
- u8 val8;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
- int beacon_timeout;
-
- pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
- pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
-
- /* update wireless mode */
- update_wireless_mode(padapter);
-
- /* update capability */
- caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
- update_capinfo(padapter, caps);
- if (caps & cap_ESS) {
- Set_MSR(padapter, WIFI_FW_STATION_STATE);
-
- val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
-
- rtw_write8(padapter, REG_SECCFG, val8);
-
- /* switch channel */
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- /* here wait for receiving the beacon to start auth */
- /* and enable a timer */
- beacon_timeout = decide_wait_for_beacon_timeout(pmlmeinfo->bcn_interval);
- set_link_timer(pmlmeext, beacon_timeout);
- _set_timer(&padapter->mlmepriv.assoc_timer,
- (REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO * REASSOC_LIMIT) + beacon_timeout);
-
- pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
- } else if (caps & cap_IBSS) { /* adhoc client */
- Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
-
- rtw_write8(padapter, REG_SECCFG, 0xcf);
-
- /* switch channel */
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- beacon_timing_control(padapter);
-
- pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
-
- report_join_res(padapter, 1);
- } else {
- return;
- }
-}
-
-void start_clnt_auth(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
- pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
-
- pmlmeinfo->auth_seq = 1;
- pmlmeinfo->reauth_count = 0;
- pmlmeinfo->reassoc_count = 0;
- pmlmeinfo->link_count = 0;
- pmlmeext->retry = 0;
-
- /* Because of AP's not receiving deauth before */
- /* AP may: 1)not response auth or 2)deauth us after link is complete */
- /* issue deauth before issuing auth to deal with the situation */
- /* Commented by Albert 2012/07/21 */
- /* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
- issue_deauth(padapter, (&pmlmeinfo->network)->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
-
- issue_auth(padapter, NULL, 0);
-
- set_link_timer(pmlmeext, REAUTH_TO);
-}
-
-void start_clnt_assoc(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
- pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
-
- issue_assocreq(padapter);
-
- set_link_timer(pmlmeext, REASSOC_TO);
-}
-
-void receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* check A3 */
- if (!(!memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
- return;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- report_del_sta_event(padapter, MacAddr, reason);
- } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- report_join_res(padapter, -2);
- }
- }
-}
-
-static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
-{
- struct registry_priv *pregistrypriv;
- struct mlme_ext_priv *pmlmeext;
- struct rt_channel_info *chplan_new;
- u8 channel;
- u8 i;
-
- pregistrypriv = &padapter->registrypriv;
- pmlmeext = &padapter->mlmeextpriv;
-
- /* Adjust channel plan by AP Country IE */
- if (pregistrypriv->enable80211d &&
- (!pmlmeext->update_channel_plan_by_ap_done)) {
- u8 *ie, *p;
- u32 len;
- struct rt_channel_plan chplan_ap;
- struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
- u8 country[4];
- u8 fcn; /* first channel number */
- u8 noc; /* number of channel */
- u8 j, k;
-
- ie = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (!ie)
- return;
- if (len < 6)
- return;
- ie += 2;
- p = ie;
- ie += len;
-
- memset(country, 0, 4);
- memcpy(country, p, 3);
- p += 3;
-
- i = 0;
- while ((ie - p) >= 3) {
- fcn = *(p++);
- noc = *(p++);
- p++;
-
- for (j = 0; j < noc; j++) {
- channel = fcn + j;
- chplan_ap.Channel[i++] = channel;
- }
- }
- chplan_ap.Len = i;
-
- memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
-
- memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
- chplan_new = pmlmeext->channel_set;
-
- i = 0;
- j = 0;
- k = 0;
- if (pregistrypriv->wireless_mode & WIRELESS_11G) {
- do {
- if ((i == MAX_CHANNEL_NUM) ||
- (chplan_sta[i].ChannelNum == 0))
- break;
-
- if (j == chplan_ap.Len)
- break;
-
- if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- i++;
- j++;
- k++;
- } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } while (1);
-
- /* change AP not support channel to Passive scan */
- while ((i < MAX_CHANNEL_NUM) &&
- (chplan_sta[i].ChannelNum != 0) &&
- (chplan_sta[i].ChannelNum <= 14)) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- }
-
- /* add channel AP supported */
- while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } else {
- /* keep original STA 2.4G channel plan */
- while ((i < MAX_CHANNEL_NUM) &&
- (chplan_sta[i].ChannelNum != 0) &&
- (chplan_sta[i].ChannelNum <= 14)) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = chplan_sta[i].ScanType;
- i++;
- k++;
- }
-
- /* skip AP 2.4G channel plan */
- while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
- j++;
- }
-
- /* keep original STA 5G channel plan */
- while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = chplan_sta[i].ScanType;
- i++;
- k++;
- }
-
- pmlmeext->update_channel_plan_by_ap_done = 1;
- }
-
- /* If channel is used by AP, set channel scan type to active */
- channel = bssid->Configuration.DSConfig;
- chplan_new = pmlmeext->channel_set;
- i = 0;
- while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
- if (chplan_new[i].ChannelNum == channel) {
- if (chplan_new[i].ScanType == SCAN_PASSIVE)
- chplan_new[i].ScanType = SCAN_ACTIVE;
- break;
- }
- i++;
- }
-}
-
-/****************************************************************************
-
-Following are the functions to report events
-
-*****************************************************************************/
-
-void report_survey_event(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct survey_event *psurvey_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext;
- struct cmd_priv *pcmdpriv;
- /* u8 *pframe = precv_frame->rx_data; */
- /* uint len = precv_frame->len; */
-
- if (!padapter)
- return;
-
- pmlmeext = &padapter->mlmeextpriv;
- pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- INIT_LIST_HEAD(&pcmd_obj->list);
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct survey_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
-
- if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
- kfree(pcmd_obj);
- kfree(pevtcmd);
- return;
- }
-
- process_80211d(padapter, &psurvey_evt->bss);
-
- rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- pmlmeext->sitesurvey_res.bss_cnt++;
-}
-
-void report_surveydone_event(struct adapter *padapter)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct surveydone_event *psurveydone_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
- if (!pcmd_obj)
- return;
-
- cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- INIT_LIST_HEAD(&pcmd_obj->list);
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct surveydone_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
-
- rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-}
-
-void report_join_res(struct adapter *padapter, int res)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct joinbss_event *pjoinbss_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- INIT_LIST_HEAD(&pcmd_obj->list);
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct joinbss_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)(&pjoinbss_evt->network.network), &pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
- pjoinbss_evt->network.join_res = res;
- pjoinbss_evt->network.aid = res;
-
- rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
-
- rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-}
-
-void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct sta_info *psta;
- int mac_id;
- struct stadel_event *pdel_sta_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- INIT_LIST_HEAD(&pcmd_obj->list);
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct stadel_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)(&pdel_sta_evt->macaddr), MacAddr, ETH_ALEN);
- memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
-
- psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
- if (psta)
- mac_id = (int)psta->mac_id;
- else
- mac_id = (-1);
-
- pdel_sta_evt->mac_id = mac_id;
-
- rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-}
-
-void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct stassoc_event *padd_sta_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
- if (!pcmd_obj)
- return;
-
- cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- INIT_LIST_HEAD(&pcmd_obj->list);
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct stassoc_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)(&padd_sta_evt->macaddr), MacAddr, ETH_ALEN);
- padd_sta_evt->cam_id = cam_idx;
-
- rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-}
-
-/****************************************************************************
-
-Following are the event callback functions
-
-*****************************************************************************/
-
-/* for sta/adhoc mode */
-void update_sta_info(struct adapter *padapter, struct sta_info *psta)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* ERP */
- VCS_update(padapter, psta);
-
- /* HT */
- if (pmlmepriv->htpriv.ht_option) {
- psta->htpriv.ht_option = true;
-
- psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
-
- if (support_short_GI(padapter, &pmlmeinfo->HT_caps))
- psta->htpriv.sgi = true;
-
- psta->qos_option = true;
- } else {
- psta->htpriv.ht_option = false;
-
- psta->htpriv.ampdu_enable = false;
-
- psta->htpriv.sgi = false;
- psta->qos_option = false;
- }
- psta->htpriv.bwmode = pmlmeext->cur_bwmode;
- psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
-
- psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
- psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
-
- /* QoS */
- if (pmlmepriv->qospriv.qos_option)
- psta->qos_option = true;
-
- psta->state = _FW_LINKED;
-}
-
-static void rtw_reset_dm_func_flag(struct adapter *adapter)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct dm_priv *dmpriv = &haldata->dmpriv;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- odmpriv->SupportAbility = dmpriv->InitODMFlag;
-}
-
-static void rtw_clear_dm_func_flag(struct adapter *adapter)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- odmpriv->SupportAbility = 0;
-}
-
-void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
-{
- struct sta_info *psta, *psta_bmc;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u16 media_status;
-
- if (join_res < 0) {
- mlme_join(padapter, 1);
- rtw_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
-
- return;
- }
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
- /* for bc/mc */
- psta_bmc = rtw_get_bcmc_stainfo(padapter);
- if (psta_bmc) {
- pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
- update_bmc_sta_support_rate(padapter, psta_bmc->mac_id);
- Update_RA_Entry(padapter, psta_bmc->mac_id);
- }
- }
-
- /* turn on dynamic functions */
- rtw_reset_dm_func_flag(padapter);
-
- /* update IOT-releated issue */
- update_IOT_info(padapter);
-
- rtw_set_basic_rate(padapter, cur_network->SupportedRates);
-
- /* BCN interval */
- rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
-
- /* update capability */
- update_capinfo(padapter, pmlmeinfo->capability);
-
- /* WMM, Update EDCA param */
- WMMOnAssocRsp(padapter);
-
- /* HT */
- HTOnAssocRsp(padapter);
-
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
- if (psta) { /* only for infra. mode */
- pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
-
- psta->wireless_mode = pmlmeext->cur_wireless_mode;
-
- /* set per sta rate after updating HT cap. */
- set_sta_rate(padapter, psta);
- rtw_set_max_rpt_macid(padapter, psta->mac_id);
-
- media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
- rtl8188e_set_FwMediaStatus_cmd(padapter, media_status);
- }
-
- mlme_join(padapter, 2);
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
- /* correcting TSF */
- correct_TSF(padapter);
- }
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
-}
-
-void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
- /* nothing to do */
- } else { /* adhoc client */
- /* correcting TSF */
- correct_TSF(padapter);
-
- /* start beacon */
- if (send_beacon(padapter) == _FAIL) {
- pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
- pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
- return;
- }
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
- }
- mlme_join(padapter, 2);
- }
-
- pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
-
- /* rate radaptive */
- Update_RA_Entry(padapter, psta->mac_id);
-
- /* update adhoc sta_info */
- update_sta_info(padapter, psta);
-}
-
-static void mlme_disconnect(struct adapter *adapter)
-{
- int res;
- u8 reg;
-
- /* Set RCR to not to receive data frame when NO LINK state */
- /* reject all data frames */
- rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
-
- /* reset TSF */
- rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
-
- /* disable update TSF */
-
- res = rtw_read8(adapter, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_BCN_CTRL, reg | BIT(4));
-}
-
-void mlmeext_sta_del_event_callback(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (r8188eu_is_client_associated_to_ap(padapter) || r8188eu_is_ibss_empty(padapter)) {
- mlme_disconnect(padapter);
- rtw_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
-
- /* switch to the 20M Hz mode after disconnect */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- /* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- flush_all_cam_entry(padapter);
-
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
-
- /* set MSR to no link state -> infra. mode */
- Set_MSR(padapter, _HW_STATE_STATION_);
-
- _cancel_timer_ex(&pmlmeext->link_timer);
- }
-}
-
-/****************************************************************************
-
-Following are the functions for the timer handlers
-
-*****************************************************************************/
-static u8 chk_ap_is_alive(struct sta_info *psta)
-{
- u8 ret = false;
-
- if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta)) &&
- sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta) &&
- sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta))
- ret = false;
- else
- ret = true;
-
- sta_update_last_rx_pkts(psta);
-
- return ret;
-}
-
-static int rtl8188e_sreset_linked_status_check(struct adapter *padapter)
-{
- u32 rx_dma_status;
- int res;
- u8 reg;
-
- res = rtw_read32(padapter, REG_RXDMA_STATUS, &rx_dma_status);
- if (res)
- return res;
-
- if (rx_dma_status != 0x00)
- rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
-
- return rtw_read8(padapter, REG_FMETHR, &reg);
-}
-
-void linked_status_chk(struct adapter *padapter)
-{
- u32 i;
- struct sta_info *psta;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- rtl8188e_sreset_linked_status_check(padapter);
-
- if (r8188eu_is_client_associated_to_ap(padapter)) {
- /* linked infrastructure client mode */
-
- int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
- int rx_chk_limit;
-
- rx_chk_limit = 4;
- psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
- if (psta) {
- bool is_p2p_enable = false;
- is_p2p_enable = !rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE);
-
- if (!chk_ap_is_alive(psta))
- rx_chk = _FAIL;
-
- if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
- tx_chk = _FAIL;
-
- if (pmlmeext->active_keep_alive_check && (rx_chk == _FAIL || tx_chk == _FAIL)) {
- u8 backup_oper_channel = 0;
-
- /* switch to correct channel of current network before issue keep-alive frames */
- if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
- backup_oper_channel = rtw_get_oper_ch(padapter);
- SelectChannel(padapter, pmlmeext->cur_channel);
- }
-
- if (rx_chk != _SUCCESS)
- issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr);
-
- if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) {
- tx_chk = issue_nulldata(padapter, psta->hwaddr, 0, 3, 1);
- /* if tx acked and p2p disabled, set rx_chk _SUCCESS to reset retry count */
- if (tx_chk == _SUCCESS && !is_p2p_enable)
- rx_chk = _SUCCESS;
- }
-
- /* back to the original operation channel */
- if (backup_oper_channel > 0)
- SelectChannel(padapter, backup_oper_channel);
- } else {
- if (rx_chk != _SUCCESS) {
- if (pmlmeext->retry == 0) {
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- }
- }
-
- if (tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) {
- tx_chk = issue_nulldata(padapter, NULL, 0, 1, 0);
- }
- }
-
- if (rx_chk == _FAIL) {
- pmlmeext->retry++;
- if (pmlmeext->retry > rx_chk_limit) {
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
- WLAN_REASON_EXPIRATION_CHK);
- return;
- }
- } else {
- pmlmeext->retry = 0;
- }
-
- if (tx_chk == _FAIL) {
- pmlmeinfo->link_count &= 0xf;
- } else {
- pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
- pmlmeinfo->link_count = 0;
- }
- } /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.MacAddress)) != NULL) */
- } else if (r8188eu_is_client_associated_to_ibss(padapter)) {
- /* linked IBSS mode */
- /* for each assoc list entry to check the rx pkt counter */
- for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
- if (pmlmeinfo->FW_sta_info[i].status == 1) {
- psta = pmlmeinfo->FW_sta_info[i].psta;
-
- if (psta == NULL)
- continue;
- if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
- if (pmlmeinfo->FW_sta_info[i].retry < 3) {
- pmlmeinfo->FW_sta_info[i].retry++;
- } else {
- pmlmeinfo->FW_sta_info[i].retry = 0;
- pmlmeinfo->FW_sta_info[i].status = 0;
- report_del_sta_event(padapter, psta->hwaddr
- , 65535/* indicate disconnect caused by no rx */
- );
- }
- } else {
- pmlmeinfo->FW_sta_info[i].retry = 0;
- pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
- }
- }
- }
- }
-}
-
-void survey_timer_hdl(struct adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct sitesurvey_parm *psurveyPara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* issue rtw_sitesurvey_cmd */
- if (pmlmeext->sitesurvey_res.state > SCAN_START) {
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
- pmlmeext->sitesurvey_res.channel_idx++;
-
- if (pmlmeext->scan_abort) {
- if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE)) {
- rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
- pmlmeext->sitesurvey_res.channel_idx = 3;
- } else {
- pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
- }
-
- pmlmeext->scan_abort = false;/* reset */
- }
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c)
- goto exit_survey_timer_hdl;
-
- psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
- if (!psurveyPara) {
- kfree(ph2c);
- goto exit_survey_timer_hdl;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
- rtw_enqueue_cmd(pcmdpriv, ph2c);
- }
-
-exit_survey_timer_hdl:
- return;
-}
-
-void link_timer_hdl(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- report_join_res(padapter, -3);
- } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
- /* re-auth timer */
- if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
- pmlmeinfo->state = 0;
- report_join_res(padapter, -1);
- return;
- }
-
- pmlmeinfo->auth_seq = 1;
- issue_auth(padapter, NULL, 0);
- set_link_timer(pmlmeext, REAUTH_TO);
- } else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
- /* re-assoc timer */
- if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
- report_join_res(padapter, -2);
- return;
- }
-
- issue_assocreq(padapter);
- set_link_timer(pmlmeext, REASSOC_TO);
- }
-}
-
-void addba_timer_hdl(struct sta_info *psta)
-{
- struct ht_priv *phtpriv;
-
- if (!psta)
- return;
-
- phtpriv = &psta->htpriv;
-
- if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
- if (phtpriv->candidate_tid_bitmap)
- phtpriv->candidate_tid_bitmap = 0x0;
- }
-}
-
-u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
-{
- return H2C_SUCCESS;
-}
-
-u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
-{
- u8 type;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
-
- if (psetop->mode == Ndis802_11APMode) {
- pmlmeinfo->state = WIFI_FW_AP_STATE;
- type = _HW_STATE_AP_;
- } else if (psetop->mode == Ndis802_11Infrastructure) {
- pmlmeinfo->state &= ~(BIT(0) | BIT(1));/* clear state */
- pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
- type = _HW_STATE_STATION_;
- } else if (psetop->mode == Ndis802_11IBSS) {
- type = _HW_STATE_ADHOC_;
- } else {
- type = _HW_STATE_NOLINK_;
- }
-
- rtw_set_opmode(padapter, type);
-
- return H2C_SUCCESS;
-}
-
-u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
- struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
- /* u32 initialgain; */
-
- if (pparm->network.InfrastructureMode == Ndis802_11APMode) {
- if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
- /* todo: */
- return H2C_SUCCESS;
- }
- }
-
- /* below is for ad-hoc master */
- if (pparm->network.InfrastructureMode == Ndis802_11IBSS) {
- rtw_joinbss_reset(padapter);
-
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeinfo->ERP_enable = 0;
- pmlmeinfo->WMM_enable = 0;
- pmlmeinfo->HT_enable = 0;
- pmlmeinfo->HT_caps_enable = 0;
- pmlmeinfo->HT_info_enable = 0;
- pmlmeinfo->agg_enable_bitmap = 0;
- pmlmeinfo->candidate_tid_bitmap = 0;
-
- /* disable dynamic functions, such as high power, DIG */
- Save_DM_Func_Flag(padapter);
- rtw_clear_dm_func_flag(padapter);
-
- /* cancel link timer */
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- /* clear CAM */
- flush_all_cam_entry(padapter);
-
- memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
- pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
-
- if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
- return H2C_PARAMETERS_ERROR;
-
- memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
-
- start_create_ibss(padapter);
- }
-
- return H2C_SUCCESS;
-}
-
-u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
-{
- struct ndis_802_11_var_ie *pIE;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
- struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
- u32 i;
-
- /* check already connecting to AP or not */
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
- issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, 5, 100);
-
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
-
- /* clear CAM */
- flush_all_cam_entry(padapter);
-
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- /* set MSR to nolink -> infra. mode */
- Set_MSR(padapter, _HW_STATE_STATION_);
-
- mlme_disconnect(padapter);
- }
-
- rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
-
- rtw_joinbss_reset(padapter);
-
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeinfo->ERP_enable = 0;
- pmlmeinfo->WMM_enable = 0;
- pmlmeinfo->HT_enable = 0;
- pmlmeinfo->HT_caps_enable = 0;
- pmlmeinfo->HT_info_enable = 0;
- pmlmeinfo->agg_enable_bitmap = 0;
- pmlmeinfo->candidate_tid_bitmap = 0;
- pmlmeinfo->bwmode_updated = false;
-
- memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
- pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
-
- if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
- return H2C_PARAMETERS_ERROR;
-
- memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
-
- /* Check AP vendor to move rtw_joinbss_cmd() */
-
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
- if (!memcmp(pIE->data, WMM_OUI, 4))
- pmlmeinfo->WMM_enable = 1;
- break;
- case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
- pmlmeinfo->HT_caps_enable = 1;
- break;
- case _HT_EXTRA_INFO_IE_: /* Get HT Info IE. */
- pmlmeinfo->HT_info_enable = 1;
-
- /* spec case only for cisco's ap because cisco's ap issue assoc rsp using mcs rate @40MHz or @20MHz */
- {
- struct HT_info_element *pht_info = (struct HT_info_element *)(pIE->data);
-
- if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
- /* switch to the 40M Hz mode according to the AP */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch (pht_info->infos[0] & 0x3) {
- case 1:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case 3:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- }
- }
- break;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
- /* disable dynamic functions, such as high power, DIG */
-
- /* config the initial gain under linking, need to write the BB registers */
-
- rtw_set_bssid(padapter, pmlmeinfo->network.MacAddress);
- mlme_join(padapter, 0);
-
- /* cancel link timer */
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- start_clnt_join(padapter);
-
- return H2C_SUCCESS;
-}
-
-u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
- u8 val8;
- int res;
-
- if (r8188eu_is_client_associated_to_ap(padapter))
- issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
-
- mlme_disconnect(padapter);
- rtw_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
-
- if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
- /* Stop BCN */
- res = rtw_read8(padapter, REG_BCN_CTRL, &val8);
- if (res)
- return H2C_DROPPED;
-
- rtw_write8(padapter, REG_BCN_CTRL, val8 & (~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
- }
-
- /* set MSR to no link state -> infra. mode */
- Set_MSR(padapter, _HW_STATE_STATION_);
-
- pmlmeinfo->state = WIFI_FW_NULL_STATE;
-
- /* switch to the 20M Hz mode after disconnect */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- flush_all_cam_entry(padapter);
-
- _cancel_timer_ex(&pmlmeext->link_timer);
-
- rtw_free_uc_swdec_pending_queue(padapter);
-
- return H2C_SUCCESS;
-}
-
-static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
- u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
-{
- int i, j;
- int set_idx;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- /* clear out first */
- memset(out, 0, sizeof(struct rtw_ieee80211_channel) * out_num);
-
- /* acquire channels from in */
- j = 0;
- for (i = 0; i < in_num; i++) {
- set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
- if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED) &&
- set_idx >= 0) {
- memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
-
- if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
- out[j].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
-
- j++;
- }
- if (j >= out_num)
- break;
- }
-
- /* if out is empty, use channel_set as default */
- if (j == 0) {
- for (i = 0; i < pmlmeext->max_chan_nums; i++) {
- out[i].hw_value = pmlmeext->channel_set[i].ChannelNum;
-
- if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
- out[i].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
-
- j++;
- }
- }
-
- return j;
-}
-
-u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
- u8 bdelayscan = false;
- u32 i;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
- /* for first time sitesurvey_cmd */
-
- pmlmeext->sitesurvey_res.state = SCAN_START;
- pmlmeext->sitesurvey_res.bss_cnt = 0;
- pmlmeext->sitesurvey_res.channel_idx = 0;
-
- for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (pparm->ssid[i].SsidLength) {
- memcpy(pmlmeext->sitesurvey_res.ssid[i].Ssid, pparm->ssid[i].Ssid, IW_ESSID_MAX_SIZE);
- pmlmeext->sitesurvey_res.ssid[i].SsidLength = pparm->ssid[i].SsidLength;
- } else {
- pmlmeext->sitesurvey_res.ssid[i].SsidLength = 0;
- }
- }
-
- pmlmeext->sitesurvey_res.ch_num = rtw_scan_ch_decision(padapter
- , pmlmeext->sitesurvey_res.ch, RTW_CHANNEL_SCAN_AMOUNT
- , pparm->ch, pparm->ch_num
- );
-
- pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
-
- /* issue null data if associating to the AP */
- if (r8188eu_is_client_associated_to_ap(padapter)) {
- pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
-
- issue_nulldata(padapter, NULL, 1, 3, 500);
-
- bdelayscan = true;
- }
- if (bdelayscan) {
- /* delay 50ms to protect nulldata(1). */
- set_survey_timer(pmlmeext, 50);
- return H2C_SUCCESS;
- }
- }
-
- if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
- /* disable dynamic functions, such as high power, DIG */
- Save_DM_Func_Flag(padapter);
- rtw_clear_dm_func_flag(padapter);
-
- /* config the initial gain under scanning, need to write the BB registers */
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- rtw_set_initial_gain(padapter, 0x1e);
- else
- rtw_set_initial_gain(padapter, 0x28);
-
-
- /* set MSR to no link state */
- Set_MSR(padapter, _HW_STATE_NOLINK_);
-
- rtw_mlme_under_site_survey(padapter);
-
- pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
- }
-
- site_survey(padapter);
-
- return H2C_SUCCESS;
-}
-
-u8 setauth_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pparm->mode < 4)
- pmlmeinfo->auth_algo = pparm->mode;
- return H2C_SUCCESS;
-}
-
-u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
-{
- unsigned short ctrl;
- struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- /* main tx key for wep. */
- if (pparm->set_tx)
- pmlmeinfo->key_index = pparm->keyid;
-
- /* write cam */
- ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
-
- write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
-
- return H2C_SUCCESS;
-}
-
-u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
-{
- u16 ctrl = 0;
- u8 cam_id;/* cam_entry */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
-
- /* cam_entry: */
- /* 0~3 for default key */
-
- /* for concurrent mode (ap+sta): */
- /* default key is disable, using sw encrypt/decrypt */
- /* cam_entry = 4 for sta mode (macid = 0) */
- /* cam_entry(macid+3) = 5 ~ N for ap mode (aid = 1~N, macid = 2 ~N) */
-
- /* for concurrent mode (sta+sta): */
- /* default key is disable, using sw encrypt/decrypt */
- /* cam_entry = 4 mapping to macid = 0 */
- /* cam_entry = 5 mapping to macid = 2 */
-
- cam_id = 4;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (pparm->algorithm == _NO_PRIVACY_) /* clear cam entry */ {
- clear_cam_entry(padapter, pparm->id);
- return H2C_SUCCESS_RSP;
- }
-
- psta = rtw_get_stainfo(pstapriv, pparm->addr);
- if (psta) {
- ctrl = (BIT(15) | ((pparm->algorithm) << 2));
-
- if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4)))
- return H2C_REJECTED;
-
- cam_id = (psta->mac_id + 3);/* 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
-
- write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
-
- return H2C_SUCCESS_RSP;
- } else {
- return H2C_REJECTED;
- }
- }
-
- /* below for sta mode */
-
- if (pparm->algorithm == _NO_PRIVACY_) { /* clear cam entry */
- clear_cam_entry(padapter, pparm->id);
- return H2C_SUCCESS;
- }
- ctrl = BIT(15) | ((pparm->algorithm) << 2);
- write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
- pmlmeinfo->enc_algo = pparm->algorithm;
- return H2C_SUCCESS;
-}
-
-u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, pparm->addr);
-
- if (!psta)
- return H2C_SUCCESS;
-
- if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
- ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
- issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid, NULL);
- _set_timer(&psta->addba_retry_timer, ADDBA_TO);
- } else {
- psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
- }
- return H2C_SUCCESS;
-}
-
-u8 set_tx_beacon_cmd(struct adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct Tx_Beacon_param *ptxBeacon_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 res = _SUCCESS;
- int len_diff = 0;
-
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- ptxBeacon_parm = kzalloc(sizeof(*ptxBeacon_parm), GFP_ATOMIC);
- if (!ptxBeacon_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- memcpy(&ptxBeacon_parm->network, &pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
-
- len_diff = update_hidden_ssid(ptxBeacon_parm->network.IEs + _BEACON_IE_OFFSET_,
- ptxBeacon_parm->network.IELength - _BEACON_IE_OFFSET_,
- pmlmeinfo->hidden_ssid_mode);
- ptxBeacon_parm->network.IELength += len_diff;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- u8 evt_code;
- u16 evt_sz;
- uint *peventbuf;
- void (*event_callback)(struct adapter *dev, u8 *pbuf);
- struct evt_priv *pevt_priv = &padapter->evtpriv;
-
- peventbuf = (uint *)pbuf;
- evt_sz = (u16)(*peventbuf & 0xffff);
- evt_code = (u8)((*peventbuf >> 16) & 0xff);
-
- /* checking if event code is valid */
- if (evt_code >= MAX_C2HEVT)
- goto _abort_event_;
-
- /* checking if event size match the event parm size */
- if ((wlanevents[evt_code].parmsize != 0) &&
- (wlanevents[evt_code].parmsize != evt_sz))
- goto _abort_event_;
-
- atomic_inc(&pevt_priv->event_seq);
-
- peventbuf += 2;
-
- if (peventbuf) {
- event_callback = wlanevents[evt_code].event_callback;
- event_callback(padapter, (u8 *)peventbuf);
- }
-
-_abort_event_:
- return H2C_SUCCESS;
-}
-
-u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- return H2C_SUCCESS;
-}
-
-u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- if (send_beacon(padapter) == _FAIL) {
- return H2C_PARAMETERS_ERROR;
- } else {
- /* tx bc/mc frames after update TIM */
- struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo(padapter);
- if (!psta_bmc)
- return H2C_SUCCESS;
-
- if ((pstapriv->tim_bitmap & BIT(0)) && (psta_bmc->sleepq_len > 0)) {
- msleep(10);/* 10ms, ATIM(HIQ) Windows */
- spin_lock_bh(&psta_bmc->sleep_q.lock);
-
- xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- list_del_init(&pxmitframe->list);
-
- psta_bmc->sleepq_len--;
- if (psta_bmc->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
-
- pxmitframe->attrib.qsel = 0x11;/* HIQ */
-
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_xmit_complete(padapter, pxmitframe);
- spin_lock_bh(&psta_bmc->sleep_q.lock);
- }
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- }
- }
- return H2C_SUCCESS;
-}
-
-u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
-{
- struct set_ch_parm *set_ch_parm;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- set_ch_parm = (struct set_ch_parm *)pbuf;
-
- pmlmeext->cur_channel = set_ch_parm->ch;
- pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
- pmlmeext->cur_bwmode = set_ch_parm->bw;
-
- set_channel_bwmode(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
-
- return H2C_SUCCESS;
-}
-
-u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- struct SetChannelPlan_param *setChannelPlan_param;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
-
- pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
- init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
-
- return H2C_SUCCESS;
-}
-
-u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
- return H2C_SUCCESS;
-}
-
-u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- return H2C_REJECTED;
-}
-
-/* TDLS_WRCR : write RCR DATA BIT */
-/* TDLS_SD_PTI : issue peer traffic indication */
-/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
-/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
-/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
-/* TDLS_OFF_CH : first time set channel to off channel */
-/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
-/* TDLS_P_OFF_CH : periodically go to off channel */
-/* TDLS_P_BASE_CH : periodically go back to base channel */
-/* TDLS_RS_RCR : restore RCR */
-/* TDLS_CKALV_PH1 : check alive timer phase1 */
-/* TDLS_CKALV_PH2 : check alive timer phase2 */
-/* TDLS_FREE_STA : free tdls sta */
-u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf)
-{
- return H2C_REJECTED;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
deleted file mode 100644
index 93d3c9c4399c..000000000000
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ /dev/null
@@ -1,1918 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_P2P_C_
-
-#include "../include/drv_types.h"
-#include "../include/rtw_p2p.h"
-#include "../include/wifi.h"
-
-static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
-{
- int found = 0, i = 0;
-
- for (i = 0; i < ch_cnt; i++) {
- if (ch_list[i] == desired_ch) {
- found = 1;
- break;
- }
- }
- return found;
-}
-
-static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
-{
- struct list_head *phead, *plist;
- u32 len = 0;
- u16 attr_len = 0;
- u8 tmplen, *pdata_attr, *pstart, *pcur;
- struct sta_info *psta = NULL;
- struct adapter *padapter = pwdinfo->padapter;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- pdata_attr = kzalloc(MAX_P2P_IE_LEN, GFP_KERNEL);
-
- pstart = pdata_attr;
- pcur = pdata_attr;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* look up sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- if (psta->is_p2p_device) {
- tmplen = 0;
-
- pcur++;
-
- /* P2P device address */
- memcpy(pcur, psta->dev_addr, ETH_ALEN);
- pcur += ETH_ALEN;
-
- /* P2P interface address */
- memcpy(pcur, psta->hwaddr, ETH_ALEN);
- pcur += ETH_ALEN;
-
- *pcur = psta->dev_cap;
- pcur++;
-
- /* u16*)(pcur) = cpu_to_be16(psta->config_methods); */
- RTW_PUT_BE16(pcur, psta->config_methods);
- pcur += 2;
-
- memcpy(pcur, psta->primary_dev_type, 8);
- pcur += 8;
-
- *pcur = psta->num_of_secdev_type;
- pcur++;
-
- memcpy(pcur, psta->secdev_types_list, psta->num_of_secdev_type * 8);
- pcur += psta->num_of_secdev_type * 8;
-
- if (psta->dev_name_len > 0) {
- /* u16*)(pcur) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
- RTW_PUT_BE16(pcur, WPS_ATTR_DEVICE_NAME);
- pcur += 2;
-
- /* u16*)(pcur) = cpu_to_be16(psta->dev_name_len); */
- RTW_PUT_BE16(pcur, psta->dev_name_len);
- pcur += 2;
-
- memcpy(pcur, psta->dev_name, psta->dev_name_len);
- pcur += psta->dev_name_len;
- }
-
- tmplen = (u8)(pcur - pstart);
-
- *pstart = (tmplen - 1);
-
- attr_len += tmplen;
-
- /* pstart += tmplen; */
- pstart = pcur;
- }
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- if (attr_len > 0)
- len = rtw_set_p2p_attr_content(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
-
- kfree(pdata_attr);
- return len;
-}
-
-static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct adapter *padapter = pwdinfo->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_GO_DISC_REQUEST;
- u8 dialogToken = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* Build P2P action frame header */
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- /* there is no IE in this P2P action frame */
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct adapter *padapter = pwdinfo->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_DEVDISC_RESP;
- u8 p2pie[8] = { 0x00 };
- u32 p2pielen = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pwdinfo->device_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pwdinfo->device_addr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* Build P2P public action frame header */
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- /* Build P2P IE */
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* P2P_ATTR_STATUS */
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
-{
- struct adapter *padapter = pwdinfo->padapter;
- unsigned char category = WLAN_CATEGORY_PUBLIC;
- u8 action = P2P_PUB_ACTION_ACTION;
- u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_PROVISION_DISC_RESP;
- u8 wpsie[100] = { 0x00 };
- u8 wpsielen = 0;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- wpsielen = 0;
- /* WPS OUI */
- RTW_PUT_BE32(wpsie, WPSOUI);
- wpsielen += 4;
-
- /* Config Method */
- /* Type: */
- RTW_PUT_BE16(wpsie + wpsielen, WPS_ATTR_CONF_METHOD);
- wpsielen += 2;
-
- /* Length: */
- RTW_PUT_BE16(wpsie + wpsielen, 0x0002);
- wpsielen += 2;
-
- /* Value: */
- RTW_PUT_BE16(wpsie + wpsielen, config_method);
- wpsielen += 2;
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct adapter *padapter = pwdinfo->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
- __be32 p2poui = cpu_to_be32(P2POUI);
- u8 oui_subtype = P2P_PRESENCE_RESPONSE;
- u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
- u8 noa_attr_content[32] = { 0x00 };
- u32 p2pielen = 0;
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* Build P2P action frame header */
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&p2poui, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &oui_subtype, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &dialogToken, &pattrib->pktlen);
-
- /* Add P2P IE header */
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Add Status attribute in P2P IE */
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
-
- /* Add NoA attribute in P2P IE */
- noa_attr_content[0] = 0x1;/* index */
- noa_attr_content[1] = 0x0;/* CTWindow and OppPS Parameters */
-
- /* todo: Notice of Absence Descriptor(s) */
-
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_NOA, 2, noa_attr_content);
-
- pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
-u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
-{
- u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
- u16 capability = 0;
- u32 len = 0, p2pielen = 0;
- __le16 le_tmp;
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* According to the P2P Specification, the beacon frame should contain 3 P2P attributes */
- /* 1. P2P Capability */
- /* 2. P2P Device ID */
- /* 3. Notice of Absence (NOA) */
-
- /* P2P Capability ATTR */
- /* Type: */
- /* Length: */
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- /* Be able to participate in additional P2P Groups and */
- /* support the P2P Invitation Procedure */
- /* Group Capability Bitmap, 1 byte */
- capability = P2P_DEVCAP_INVITATION_PROC | P2P_DEVCAP_CLIENT_DISCOVERABILITY;
- capability |= ((P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS) << 8);
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
- capability |= (P2P_GRPCAP_GROUP_FORMATION << 8);
-
- le_tmp = cpu_to_le16(capability);
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_CAPABILITY, 2, (u8 *)&le_tmp);
-
- /* P2P Device ID ATTR */
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_DEVICE_ID, ETH_ALEN, pwdinfo->device_addr);
-
- /* Notice of Absence ATTR */
- /* Type: */
- /* Length: */
- /* Value: */
-
- pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
- return len;
-}
-
-u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
-{
- u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
- u32 len = 0, p2pielen = 0;
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20100907 */
- /* According to the P2P Specification, the probe response frame should contain 5 P2P attributes */
- /* 1. P2P Capability */
- /* 2. Extended Listen Timing */
- /* 3. Notice of Absence (NOA) (Only GO needs this) */
- /* 4. Device Info */
- /* 5. Group Info (Only GO need this) */
-
- /* P2P Capability ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
- RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- p2pie[p2pielen] = (P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS);
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
- p2pie[p2pielen] |= P2P_GRPCAP_GROUP_FORMATION;
-
- p2pielen++;
- } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
- else
- p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
- }
-
- /* Extended Listen Timing ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004); */
- RTW_PUT_LE16(p2pie + p2pielen, 0x0004);
- p2pielen += 2;
-
- /* Value: */
- /* Availability Period */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
- RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
- p2pielen += 2;
-
- /* Availability Interval */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
- RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
- p2pielen += 2;
-
- /* Notice of Absence ATTR */
- /* Type: */
- /* Length: */
- /* Value: */
-
- /* Device Info ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
- RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm); */
- RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->supported_wps_cm);
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
- RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
- RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- /* Group Info ATTR */
- /* Type: */
- /* Length: */
- /* Value: */
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
- p2pielen += go_add_group_info_attr(pwdinfo, p2pie + p2pielen);
-
- pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
-
- return len;
-}
-
-u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
-{
- u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
- u32 len = 0, p2pielen = 0;
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* Commented by Albert 20110301 */
- /* According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes */
- /* 1. P2P Capability */
- /* 2. Device Info */
- /* 3. Group ID (When joining an operating P2P Group) */
-
- /* P2P Capability ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
- RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
- p2pielen += 2;
-
- /* Value: */
- /* Device Capability Bitmap, 1 byte */
- p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
-
- /* Group Capability Bitmap, 1 byte */
- if (pwdinfo->persistent_supported)
- p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
- else
- p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
-
- /* Device Info ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
-
- /* Length: */
- /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
- /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
- RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- /* P2P Device Address */
- memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- /* Config Method */
- /* This field should be big endian. Noted by P2P specification. */
- if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC) {
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_PBC);
- } else {
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_DISPLAY);
- }
-
- p2pielen += 2;
-
- /* Primary Device Type */
- /* Category ID */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
- p2pielen += 2;
-
- /* OUI */
- /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
- RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
- p2pielen += 4;
-
- /* Sub Category ID */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
- p2pielen += 2;
-
- /* Number of Secondary Device Types */
- p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
-
- /* Device Name */
- /* Type: */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
- RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
- p2pielen += 2;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
- RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
- p2pielen += pwdinfo->device_name_len;
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
- /* Added by Albert 2011/05/19 */
- /* In this case, the pdev_raddr is the device address of the group owner. */
-
- /* P2P Group ID ATTR */
- /* Type: */
- p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
-
- /* Length: */
- /* u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + ussidlen); */
- RTW_PUT_LE16(p2pie + p2pielen, ETH_ALEN + ussidlen);
- p2pielen += 2;
-
- /* Value: */
- memcpy(p2pie + p2pielen, pdev_raddr, ETH_ALEN);
- p2pielen += ETH_ALEN;
-
- memcpy(p2pie + p2pielen, pssid, ussidlen);
- p2pielen += ussidlen;
- }
-
- pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
-
- return len;
-}
-
-u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 status_code)
-{
- u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
- u32 len = 0, p2pielen = 0;
-
- /* P2P OUI */
- p2pielen = 0;
- p2pie[p2pielen++] = 0x50;
- p2pie[p2pielen++] = 0x6F;
- p2pie[p2pielen++] = 0x9A;
- p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
-
- /* According to the P2P Specification, the Association response frame should contain 2 P2P attributes */
- /* 1. Status */
- /* 2. Extended Listen Timing (optional) */
-
- /* Status ATTR */
- p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status_code);
-
- /* Extended Listen Timing ATTR */
- /* Type: */
- /* Length: */
- /* Value: */
-
- pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
-
- return len;
-}
-
-u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- u8 *p;
- u32 ret = false;
- u8 *p2pie;
- u32 p2pielen = 0;
- int ssid_len = 0, rate_cnt = 0;
-
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SUPPORTEDRATES_IE_, (int *)&rate_cnt,
- len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
-
- if (rate_cnt <= 4) {
- int i, g_rate = 0;
-
- for (i = 0; i < rate_cnt; i++) {
- if (((*(p + 2 + i) & 0xff) != 0x02) &&
- ((*(p + 2 + i) & 0xff) != 0x04) &&
- ((*(p + 2 + i) & 0xff) != 0x0B) &&
- ((*(p + 2 + i) & 0xff) != 0x16))
- g_rate = 1;
- }
-
- if (g_rate == 0) {
- /* There is no OFDM rate included in SupportedRates IE of this probe request frame */
- /* The driver should response this probe request. */
- return ret;
- }
- } else {
- /* rate_cnt > 4 means the SupportRates IE contains the OFDM rate because the count of CCK rates are 4. */
- /* We should proceed the following check for this probe request. */
- }
-
- /* Added comments by Albert 20100906 */
- /* There are several items we should check here. */
- /* 1. This probe request frame must contain the P2P IE. (Done) */
- /* 2. This probe request frame must contain the wildcard SSID. (Done) */
- /* 3. Wildcard BSSID. (Todo) */
- /* 4. Destination Address. (Done in mgt_dispatcher function) */
- /* 5. Requested Device Type in WSC IE. (Todo) */
- /* 6. Device ID attribute in P2P IE. (Todo) */
-
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ssid_len,
- len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
-
- ssid_len &= 0xff; /* Just last 1 byte is valid for ssid len of the probe request */
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_, NULL, &p2pielen);
- if (p2pie) {
- if (p && !memcmp((void *)(p + 2), (void *)pwdinfo->p2p_wildcard_ssid, 7)) {
- /* todo: */
- /* Check Requested Device Type attributes in WSC IE. */
- /* Check Device ID attribute in P2P IE */
-
- ret = true;
- } else if (p && ssid_len == 0) {
- ret = true;
- }
- } else {
- /* non -p2p device */
- }
- }
-
- return ret;
-}
-
-u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len, struct sta_info *psta)
-{
- u8 status_code = P2P_STATUS_SUCCESS;
- u8 *pbuf, *pattr_content = NULL;
- u32 attr_contentlen = 0;
- u16 cap_attr = 0;
- unsigned short frame_type, ie_offset = 0;
- u8 *ies;
- u32 ies_len;
- u8 *p2p_ie;
- u32 p2p_ielen = 0;
- __be16 be_tmp;
- __le16 le_tmp;
-
- if (!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
- return P2P_STATUS_FAIL_REQUEST_UNABLE;
-
- frame_type = GetFrameSubType(pframe);
- if (frame_type == WIFI_ASSOCREQ)
- ie_offset = _ASOCREQ_IE_OFFSET_;
- else /* WIFI_REASSOCREQ */
- ie_offset = _REASOCREQ_IE_OFFSET_;
-
- ies = pframe + WLAN_HDR_A3_LEN + ie_offset;
- ies_len = len - WLAN_HDR_A3_LEN - ie_offset;
-
- p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
-
- if (!p2p_ie)
- status_code = P2P_STATUS_FAIL_INVALID_PARAM;
-
- while (p2p_ie) {
- /* Check P2P Capability ATTR */
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8 *)&le_tmp, (uint *)&attr_contentlen)) {
- cap_attr = le16_to_cpu(le_tmp);
- psta->dev_cap = cap_attr & 0xff;
- }
-
- /* Check Extended Listen Timing ATTR */
-
- /* Check P2P Device Info ATTR */
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, NULL, (uint *)&attr_contentlen)) {
- pattr_content = kzalloc(attr_contentlen, GFP_KERNEL);
- pbuf = pattr_content;
- if (pattr_content) {
- u8 num_of_secdev_type;
- u16 dev_name_len;
-
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, pattr_content, (uint *)&attr_contentlen);
-
- memcpy(psta->dev_addr, pattr_content, ETH_ALEN);/* P2P Device Address */
-
- pattr_content += ETH_ALEN;
-
- memcpy(&be_tmp, pattr_content, 2);/* Config Methods */
- psta->config_methods = be16_to_cpu(be_tmp);
-
- pattr_content += 2;
-
- memcpy(psta->primary_dev_type, pattr_content, 8);
-
- pattr_content += 8;
-
- num_of_secdev_type = *pattr_content;
- pattr_content += 1;
-
- if (num_of_secdev_type == 0) {
- psta->num_of_secdev_type = 0;
- } else {
- u32 len;
-
- psta->num_of_secdev_type = num_of_secdev_type;
-
- len = (sizeof(psta->secdev_types_list) < (num_of_secdev_type * 8)) ?
- (sizeof(psta->secdev_types_list)) : (num_of_secdev_type * 8);
-
- memcpy(psta->secdev_types_list, pattr_content, len);
-
- pattr_content += (num_of_secdev_type * 8);
- }
-
- psta->dev_name_len = 0;
- if (be16_to_cpu(*(__be16 *)pattr_content) == WPS_ATTR_DEVICE_NAME) {
- dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content + 2));
-
- psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
-
- memcpy(psta->dev_name, pattr_content + 4, psta->dev_name_len);
- }
- kfree(pbuf);
- }
- }
-
- /* Get the next P2P IE */
- p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
- }
-
- return status_code;
-}
-
-u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- u8 *frame_body;
- u8 status, dialogToken;
- struct sta_info *psta = NULL;
- struct adapter *padapter = pwdinfo->padapter;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *p2p_ie;
- u32 p2p_ielen = 0;
-
- frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
-
- dialogToken = frame_body[7];
- status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
-
- p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
- if (p2p_ie) {
- u8 groupid[38] = { 0x00 };
- u8 dev_addr[ETH_ALEN] = { 0x00 };
- u32 attr_contentlen = 0;
-
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
- if (!memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
- !memcmp(pwdinfo->p2p_group_ssid, groupid + ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
- struct list_head *phead, *plist;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* look up sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- if (psta->is_p2p_device && (psta->dev_cap & P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
- !memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
- /* issue GO Discoverability Request */
- issue_group_disc_req(pwdinfo, psta->hwaddr);
- status = P2P_STATUS_SUCCESS;
- break;
- } else {
- status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- }
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- } else {
- status = P2P_STATUS_FAIL_INVALID_PARAM;
- }
- } else {
- status = P2P_STATUS_FAIL_INVALID_PARAM;
- }
- }
- }
-
- /* issue Device Discoverability Response */
- issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
-
- return status == P2P_STATUS_SUCCESS;
-}
-
-u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- return true;
-}
-
-u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- u8 *frame_body;
- u8 *wpsie;
- uint wps_ielen = 0, attr_contentlen = 0;
- u16 uconfig_method = 0;
- __be16 be_tmp;
-
- frame_body = (pframe + sizeof(struct ieee80211_hdr_3addr));
-
- wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
- if (wpsie) {
- if (rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_CONF_METHOD, (u8 *)&be_tmp, &attr_contentlen)) {
- uconfig_method = be16_to_cpu(be_tmp);
- switch (uconfig_method) {
- case WPS_CM_DISPLYA:
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
- break;
- case WPS_CM_LABEL:
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "lab", 3);
- break;
- case WPS_CM_PUSH_BUTTON:
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
- break;
- case WPS_CM_KEYPAD:
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
- break;
- }
- issue_p2p_provision_resp(pwdinfo, GetAddr2Ptr(pframe), frame_body, uconfig_method);
- }
- }
- return true;
-}
-
-u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe)
-{
- return true;
-}
-
-static u8 rtw_p2p_get_peer_ch_list(struct wifidirect_info *pwdinfo, u8 *ch_content, u8 ch_cnt, u8 *peer_ch_list)
-{
- u8 i = 0, j = 0;
- u8 temp = 0;
- u8 ch_no = 0;
- ch_content += 3;
- ch_cnt -= 3;
-
- while (ch_cnt > 0) {
- ch_content += 1;
- ch_cnt -= 1;
- temp = *ch_content;
- for (i = 0 ; i < temp ; i++, j++)
- peer_ch_list[j] = *(ch_content + 1 + i);
- ch_content += (temp + 1);
- ch_cnt -= (temp + 1);
- ch_no += temp;
- }
-
- return ch_no;
-}
-
-static u8 rtw_p2p_ch_inclusion(struct mlme_ext_priv *pmlmeext, u8 *peer_ch_list, u8 peer_ch_num, u8 *ch_list_inclusioned)
-{
- int i = 0, j = 0, temp = 0;
- u8 ch_no = 0;
-
- for (i = 0; i < peer_ch_num; i++) {
- for (j = temp; j < pmlmeext->max_chan_nums; j++) {
- if (*(peer_ch_list + i) == pmlmeext->channel_set[j].ChannelNum) {
- ch_list_inclusioned[ch_no++] = *(peer_ch_list + i);
- temp = j;
- break;
- }
- }
- }
-
- return ch_no;
-}
-
-u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- struct adapter *padapter = pwdinfo->padapter;
- u8 result = P2P_STATUS_SUCCESS;
- u32 p2p_ielen = 0, wps_ielen = 0;
- u8 *ies;
- u32 ies_len;
- u8 *p2p_ie;
- u8 *wpsie;
- u16 wps_devicepassword_id = 0x0000;
- uint wps_devicepassword_id_len = 0;
- __be16 be_tmp;
-
- wpsie = rtw_get_wps_ie(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
- if (wpsie) {
- /* Commented by Kurt 20120113 */
- /* If some device wants to do p2p handshake without sending prov_disc_req */
- /* We have to get peer_req_cm from here. */
- if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
- rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
- wps_devicepassword_id = be16_to_cpu(be_tmp);
-
- if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
- else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
- else
- memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
- }
- } else {
- result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- return result;
- }
-
- if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO) {
- result = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INFOR_NOREADY);
- return result;
- }
-
- ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
- ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
-
- p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
-
- if (!p2p_ie) {
- result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- }
-
- while (p2p_ie) {
- u8 attr_content = 0x00;
- u32 attr_contentlen = 0;
- u8 ch_content[50] = { 0x00 };
- uint ch_cnt = 0;
- u8 peer_ch_list[50] = { 0x00 };
- u8 peer_ch_num = 0;
- u8 ch_list_inclusioned[50] = { 0x00 };
- u8 ch_num_inclusioned = 0;
-
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
-
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT, &attr_content, &attr_contentlen)) {
- pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
-
- if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
- /* Try to match the tie breaker value */
- if (pwdinfo->intent == P2P_MAX_INTENT) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
- } else {
- if (attr_content & 0x01)
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- else
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- }
- } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* Store the group id information. */
- memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
- memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
- }
- }
-
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
- if (attr_contentlen != ETH_ALEN)
- memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
- }
-
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, ch_content, &ch_cnt)) {
- peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, ch_content, ch_cnt, peer_ch_list);
- ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
-
- if (ch_num_inclusioned == 0) {
- result = P2P_STATUS_FAIL_NO_COMMON_CH;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- break;
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
- ch_list_inclusioned, ch_num_inclusioned)) {
- u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
- attr_contentlen = 0;
-
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
- peer_operating_ch = operatingch_info[4];
-
- if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
- ch_list_inclusioned,
- ch_num_inclusioned))
- /**
- * Change our operating channel as peer's for compatibility.
- */
- pwdinfo->operating_channel = peer_operating_ch;
- else
- /* Take first channel of ch_list_inclusioned as operating channel */
- pwdinfo->operating_channel = ch_list_inclusioned[0];
- }
- }
- }
-
- /* Get the next P2P IE */
- p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
- }
- return result;
-}
-
-u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- struct adapter *padapter = pwdinfo->padapter;
- u8 result = P2P_STATUS_SUCCESS;
- u32 p2p_ielen, wps_ielen;
- u8 *ies;
- u32 ies_len;
- u8 *p2p_ie;
-
- ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
- ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
-
- /* Be able to know which one is the P2P GO and which one is P2P client. */
-
- if (!rtw_get_wps_ie(ies, ies_len, NULL, &wps_ielen)) {
- result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- }
-
- p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
- if (!p2p_ie) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
- } else {
- u8 attr_content = 0x00;
- u32 attr_contentlen = 0;
- u8 operatingch_info[5] = { 0x00 };
- u8 groupid[38];
- u8 peer_ch_list[50] = { 0x00 };
- u8 peer_ch_num = 0;
- u8 ch_list_inclusioned[50] = { 0x00 };
- u8 ch_num_inclusioned = 0;
-
- while (p2p_ie) { /* Found the P2P IE. */
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
- if (attr_contentlen == 1) {
- if (attr_content == P2P_STATUS_SUCCESS) {
- /* Do nothing. */
- } else {
- if (attr_content == P2P_STATUS_FAIL_INFO_UNAVAILABLE) {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
- } else {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- }
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- result = attr_content;
- break;
- }
- }
-
- /* Try to get the peer's interface address */
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
- if (attr_contentlen != ETH_ALEN)
- memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
- }
-
- /* Try to get the peer's intent and tie breaker value. */
- attr_content = 0x00;
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT, &attr_content, &attr_contentlen)) {
- pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
-
- if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
- /* Try to match the tie breaker value */
- if (pwdinfo->intent == P2P_MAX_INTENT) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- } else {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- if (attr_content & 0x01)
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- else
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- }
- } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- } else {
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- /* Store the group id information. */
- memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
- memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
- }
- }
-
- /* Try to get the operation channel information */
-
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie,
- p2p_ielen,
- P2P_ATTR_OPERATING_CH,
- operatingch_info,
- &attr_contentlen))
- pwdinfo->peer_operating_ch = operatingch_info[4];
-
- /* Try to get the channel list information */
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, pwdinfo->channel_list_attr, &pwdinfo->channel_list_attr_len)) {
-
- peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len, peer_ch_list);
- ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
-
- if (ch_num_inclusioned == 0) {
- result = P2P_STATUS_FAIL_NO_COMMON_CH;
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- break;
- }
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
- ch_list_inclusioned, ch_num_inclusioned)) {
- u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
- attr_contentlen = 0;
-
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
- peer_operating_ch = operatingch_info[4];
-
- if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
- ch_list_inclusioned, ch_num_inclusioned))
- /**
- * Change our operating channel as peer's for compatibility.
- */
- pwdinfo->operating_channel = peer_operating_ch;
- else
- /* Take first channel of ch_list_inclusioned as operating channel */
- pwdinfo->operating_channel = ch_list_inclusioned[0];
- }
- }
- }
-
- /* Try to get the group id information if peer is GO */
- attr_contentlen = 0;
- memset(groupid, 0x00, 38);
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
- memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
- memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
- }
-
- /* Get the next P2P IE */
- p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
- }
- }
- return result;
-}
-
-u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- u8 *ies;
- u32 ies_len;
- u8 *p2p_ie;
- u32 p2p_ielen = 0;
- u8 result = P2P_STATUS_SUCCESS;
- ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
- ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
-
- p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
- while (p2p_ie) { /* Found the P2P IE. */
- u8 attr_content = 0x00, operatingch_info[5] = { 0x00 };
- u8 groupid[38] = { 0x00 };
- u32 attr_contentlen = 0;
-
- pwdinfo->negotiation_dialog_token = 1;
- rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
- if (attr_contentlen == 1) {
- result = attr_content;
-
- if (attr_content == P2P_STATUS_SUCCESS) {
- del_timer_sync(&pwdinfo->restore_p2p_state_timer);
-
- /* Commented by Albert 20100911 */
- /* Todo: Need to handle the case which both Intents are the same. */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- if ((pwdinfo->intent) > (pwdinfo->peer_intent >> 1)) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- } else if ((pwdinfo->intent) < (pwdinfo->peer_intent >> 1)) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- } else {
- /* Have to compare the Tie Breaker */
- if (pwdinfo->peer_intent & 0x01)
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- else
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- }
- } else {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
- break;
- }
- }
-
- /* Try to get the group id information */
- attr_contentlen = 0;
- memset(groupid, 0x00, 38);
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
- memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
- memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
- }
-
- attr_contentlen = 0;
- if (rtw_get_p2p_attr_content(p2p_ie,
- p2p_ielen,
- P2P_ATTR_OPERATING_CH,
- operatingch_info,
- &attr_contentlen))
- pwdinfo->peer_operating_ch = operatingch_info[4];
-
- /* Get the next P2P IE */
- p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
- }
- return result;
-}
-
-u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
-{
- u8 *frame_body;
- u8 dialogToken = 0;
- u8 status = P2P_STATUS_SUCCESS;
-
- frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
-
- dialogToken = frame_body[6];
-
- /* todo: check NoA attribute */
-
- issue_p2p_presence_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
-
- return true;
-}
-
-static void find_phase_handler(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_802_11_ssid ssid;
-
- memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
- memcpy(ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
- ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
-
- rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
-
- spin_lock_bh(&pmlmepriv->lock);
- spin_unlock_bh(&pmlmepriv->lock);
-
-}
-
-void p2p_concurrent_handler(struct adapter *padapter);
-
-static void restore_p2p_state_handler(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
- /* In the P2P client mode, the driver should not switch back to its listen channel */
- /* because this P2P client should stay at the operating channel of P2P GO. */
- set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- }
-
-}
-
-static void pre_tx_invitereq_handler(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_mlme_under_site_survey(padapter);
- issue_probereq_p2p(padapter);
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-
-}
-
-static void pre_tx_provdisc_handler(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_mlme_under_site_survey(padapter);
- issue_probereq_p2p(padapter);
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-
-}
-
-static void pre_tx_negoreq_handler(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_mlme_under_site_survey(padapter);
- issue_probereq_p2p(padapter);
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-
-}
-
-void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType)
-{
-
- switch (intCmdType) {
- case P2P_FIND_PHASE_WK:
- find_phase_handler(padapter);
- break;
- case P2P_RESTORE_STATE_WK:
- restore_p2p_state_handler(padapter);
- break;
- case P2P_PRE_TX_PROVDISC_PROCESS_WK:
- pre_tx_provdisc_handler(padapter);
- break;
- case P2P_PRE_TX_INVITEREQ_PROCESS_WK:
- pre_tx_invitereq_handler(padapter);
- break;
- case P2P_PRE_TX_NEGOREQ_PROCESS_WK:
- pre_tx_negoreq_handler(padapter);
- break;
- }
-
-}
-
-void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
-{
- u8 *p2p_ie;
- u32 p2p_ielen = 0;
- u8 noa_attr[MAX_P2P_IE_LEN] = { 0x00 };/* NoA length should be n*(13) + 2 */
- u32 attr_contentlen = 0;
-
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 find_p2p = false, find_p2p_ps = false;
- u8 noa_offset, noa_num, noa_index;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- p2p_ie = rtw_get_p2p_ie(IEs, IELength, NULL, &p2p_ielen);
-
- while (p2p_ie) {
- find_p2p = true;
- /* Get Notice of Absence IE. */
- if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_NOA, noa_attr, &attr_contentlen)) {
- find_p2p_ps = true;
- noa_index = noa_attr[0];
-
- if ((pwdinfo->p2p_ps_mode == P2P_PS_NONE) ||
- (noa_index != pwdinfo->noa_index)) { /* if index change, driver should reconfigure related setting. */
- pwdinfo->noa_index = noa_index;
- pwdinfo->opp_ps = noa_attr[1] >> 7;
- pwdinfo->ctwindow = noa_attr[1] & 0x7F;
-
- noa_offset = 2;
- noa_num = 0;
- /* NoA length should be n*(13) + 2 */
- if (attr_contentlen > 2) {
- while (noa_offset < attr_contentlen) {
- /* memcpy(&wifidirect_info->noa_count[noa_num], &noa_attr[noa_offset], 1); */
- pwdinfo->noa_count[noa_num] = noa_attr[noa_offset];
- noa_offset += 1;
-
- memcpy(&pwdinfo->noa_duration[noa_num], &noa_attr[noa_offset], 4);
- noa_offset += 4;
-
- memcpy(&pwdinfo->noa_interval[noa_num], &noa_attr[noa_offset], 4);
- noa_offset += 4;
-
- memcpy(&pwdinfo->noa_start_time[noa_num], &noa_attr[noa_offset], 4);
- noa_offset += 4;
-
- noa_num++;
- }
- }
- pwdinfo->noa_num = noa_num;
-
- if (pwdinfo->opp_ps == 1) {
- pwdinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
- /* driver should wait LPS for entering CTWindow */
- if (padapter->pwrctrlpriv.bFwCurrentInPSMode)
- p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
- } else if (pwdinfo->noa_num > 0) {
- pwdinfo->p2p_ps_mode = P2P_PS_NOA;
- p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
- } else if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
- p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
- }
- }
-
- break; /* find target, just break. */
- }
-
- /* Get the next P2P IE */
- p2p_ie = rtw_get_p2p_ie(p2p_ie + p2p_ielen, IELength - (p2p_ie - IEs + p2p_ielen), NULL, &p2p_ielen);
- }
-
- if (find_p2p) {
- if ((pwdinfo->p2p_ps_mode > P2P_PS_NONE) && !find_p2p_ps)
- p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
- }
-
-}
-
-void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Pre action for p2p state */
- switch (p2p_ps_state) {
- case P2P_PS_DISABLE:
- pwdinfo->p2p_ps_state = p2p_ps_state;
-
- rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
-
- pwdinfo->noa_index = 0;
- pwdinfo->ctwindow = 0;
- pwdinfo->opp_ps = 0;
- pwdinfo->noa_num = 0;
- pwdinfo->p2p_ps_mode = P2P_PS_NONE;
- if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
- if (pwrpriv->smart_ps == 0) {
- pwrpriv->smart_ps = 2;
- rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
- }
- }
- break;
- case P2P_PS_ENABLE:
- if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
- pwdinfo->p2p_ps_state = p2p_ps_state;
-
- if (pwdinfo->ctwindow > 0) {
- if (pwrpriv->smart_ps != 0) {
- pwrpriv->smart_ps = 0;
- rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
- }
- }
- rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
- }
- break;
- case P2P_PS_SCAN:
- case P2P_PS_SCAN_DONE:
- case P2P_PS_ALLSTASLEEP:
- if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
- pwdinfo->p2p_ps_state = p2p_ps_state;
- rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
- }
- break;
- default:
- break;
- }
-
-}
-
-u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return res;
-
- if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = P2P_PS_WK_CID;
- pdrvextra_cmd_parm->type_size = p2p_ps_state;
- pdrvextra_cmd_parm->pbuf = NULL;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
- } else {
- p2p_ps_wk_hdl(padapter, p2p_ps_state);
- }
-
-exit:
-
- return res;
-}
-
-static void reset_ch_sitesurvey_timer_process(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, pwrctrlpriv.pwr_state_check_timer);
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- /* Reset the operation channel information */
- pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
- pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
-}
-
-static void reset_ch_sitesurvey_timer_process2(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, pwrctrlpriv.pwr_state_check_timer);
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- /* Reset the operation channel information */
- pwdinfo->p2p_info.operation_ch[0] = 0;
- pwdinfo->p2p_info.scan_op_ch_only = 0;
-}
-
-static void restore_p2p_state_timer_process(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, wdinfo.restore_p2p_state_timer);
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- p2p_protocol_wk_cmd(adapter, P2P_RESTORE_STATE_WK);
-}
-
-static void pre_tx_scan_timer_process(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, wdinfo.pre_tx_scan_timer);
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
- if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */
- p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_PROVDISC_PROCESS_WK);
- /* issue_probereq_p2p(adapter); */
- /* _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); */
- }
- } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
- if (pwdinfo->nego_req_info.benable)
- p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_NEGOREQ_PROCESS_WK);
- } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
- if (pwdinfo->invitereq_info.benable)
- p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_INVITEREQ_PROCESS_WK);
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static void find_phase_timer_process(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, wdinfo.find_phase_timer);
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
-
- adapter->wdinfo.find_phase_state_exchange_cnt++;
-
- p2p_protocol_wk_cmd(adapter, P2P_FIND_PHASE_WK);
-}
-
-void reset_global_wifidirect_info(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo;
-
- pwdinfo = &padapter->wdinfo;
- pwdinfo->persistent_supported = 0;
- pwdinfo->session_available = true;
-}
-
-void rtw_init_wifidirect_timers(struct adapter *padapter)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- timer_setup(&pwdinfo->find_phase_timer, find_phase_timer_process, 0);
- timer_setup(&pwdinfo->restore_p2p_state_timer, restore_p2p_state_timer_process, 0);
- timer_setup(&pwdinfo->pre_tx_scan_timer, pre_tx_scan_timer_process, 0);
- timer_setup(&pwdinfo->reset_ch_sitesurvey, reset_ch_sitesurvey_timer_process, 0);
- timer_setup(&pwdinfo->reset_ch_sitesurvey2, reset_ch_sitesurvey_timer_process2, 0);
-}
-
-void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr, u8 *iface_addr)
-{
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /*init device&interface address */
- if (dev_addr)
- memcpy(pwdinfo->device_addr, dev_addr, ETH_ALEN);
- if (iface_addr)
- memcpy(pwdinfo->interface_addr, iface_addr, ETH_ALEN);
-}
-
-void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
-{
- struct wifidirect_info *pwdinfo;
-
- pwdinfo = &padapter->wdinfo;
- pwdinfo->padapter = padapter;
-
- /* 1, 6, 11 are the social channel defined in the WiFi Direct specification. */
- pwdinfo->social_chan[0] = 1;
- pwdinfo->social_chan[1] = 6;
- pwdinfo->social_chan[2] = 11;
- pwdinfo->social_chan[3] = 0; /* channel 0 for scanning ending in site survey function. */
-
- /* Use the channel 11 as the listen channel */
- pwdinfo->listen_channel = 11;
-
- if (role == P2P_ROLE_DEVICE) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
- pwdinfo->intent = 1;
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_LISTEN);
- } else if (role == P2P_ROLE_CLIENT) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- pwdinfo->intent = 1;
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- } else if (role == P2P_ROLE_GO) {
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
- pwdinfo->intent = 15;
- rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
- }
-
-/* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
- pwdinfo->support_rate[0] = 0x8c; /* 6(B) */
- pwdinfo->support_rate[1] = 0x92; /* 9(B) */
- pwdinfo->support_rate[2] = 0x18; /* 12 */
- pwdinfo->support_rate[3] = 0x24; /* 18 */
- pwdinfo->support_rate[4] = 0x30; /* 24 */
- pwdinfo->support_rate[5] = 0x48; /* 36 */
- pwdinfo->support_rate[6] = 0x60; /* 48 */
- pwdinfo->support_rate[7] = 0x6c; /* 54 */
-
- memcpy(pwdinfo->p2p_wildcard_ssid, "DIRECT-", 7);
-
- memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
- pwdinfo->device_name_len = 0;
-
- memset(&pwdinfo->invitereq_info, 0x00, sizeof(struct tx_invite_req_info));
- pwdinfo->invitereq_info.token = 3; /* Token used for P2P invitation request frame. */
-
- memset(&pwdinfo->inviteresp_info, 0x00, sizeof(struct tx_invite_resp_info));
- pwdinfo->inviteresp_info.token = 0;
-
- pwdinfo->profileindex = 0;
- memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
-
- rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
-
- pwdinfo->listen_dwell = (u8)((jiffies % 3) + 1);
-
- memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
- pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
-
- memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
-
- pwdinfo->device_password_id_for_nego = WPS_DPID_PBC;
- pwdinfo->negotiation_dialog_token = 1;
-
- memset(pwdinfo->nego_ssid, 0x00, WLAN_SSID_MAXLEN);
- pwdinfo->nego_ssidlen = 0;
-
- pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
- pwdinfo->supported_wps_cm = WPS_CONFIG_METHOD_DISPLAY | WPS_CONFIG_METHOD_PBC | WPS_CONFIG_METHOD_KEYPAD;
- pwdinfo->channel_list_attr_len = 0;
- memset(pwdinfo->channel_list_attr, 0x00, 100);
-
- memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
- memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
- memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
- memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
- memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
-
- pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
- pwdinfo->rx_invitereq_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
- pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
- pwdinfo->p2p_info.operation_ch[0] = 0;
- pwdinfo->p2p_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
- pwdinfo->p2p_info.scan_op_ch_only = 0;
-}
-
-int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
-{
- int ret;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
- /* leave IPS/Autosuspend */
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- return ret;
-
- /* Added by Albert 2011/03/22 */
- /* In the P2P mode, the driver should not support the b mode. */
- /* So, the Tx packet shouldn't use the CCK rate */
- update_tx_basic_rate(padapter, (WIRELESS_11G | WIRELESS_11_24N));
-
- /* Enable P2P function */
- init_wifidirect_info(padapter, role);
-
- } else if (role == P2P_ROLE_DISABLE) {
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- return ret;
-
- /* Disable P2P function */
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- _cancel_timer_ex(&pwdinfo->find_phase_timer);
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey2);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
- rtw_p2p_set_role(pwdinfo, P2P_ROLE_DISABLE);
- memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
- }
-
- /* Restore to initial setting. */
- update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
- }
-
- return 0;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
deleted file mode 100644
index 051cdcb11ff5..000000000000
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_PWRCTRL_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/osdep_intf.h"
-#include "../include/linux/usb.h"
-
-static void ips_enter(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct xmit_priv *pxmit_priv = &padapter->xmitpriv;
-
- if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
- pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF)
- return;
-
- mutex_lock(&pwrpriv->lock);
-
- pwrpriv->bips_processing = true;
-
- /* syn ips_mode with request */
- pwrpriv->ips_mode = pwrpriv->ips_mode_req;
-
- pwrpriv->ips_enter_cnts++;
- pwrpriv->bpower_saving = true;
-
- if (pwrpriv->ips_mode == IPS_LEVEL_2)
- pwrpriv->bkeepfwalive = true;
-
- rtw_ips_pwr_down(padapter);
- pwrpriv->rf_pwrstate = rf_off;
-
- pwrpriv->bips_processing = false;
-
- mutex_unlock(&pwrpriv->lock);
-}
-
-static int ips_leave(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int result = _SUCCESS;
- int keyid;
-
- mutex_lock(&pwrpriv->lock);
-
- if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
- pwrpriv->bips_processing = true;
- pwrpriv->ips_leave_cnts++;
-
- result = rtw_ips_pwr_up(padapter);
- if (result == _SUCCESS) {
- pwrpriv->rf_pwrstate = rf_on;
- }
-
- if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
- set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- for (keyid = 0; keyid < 4; keyid++) {
- if (pmlmepriv->key_mask & BIT(keyid)) {
- if (keyid == psecuritypriv->dot11PrivacyKeyIndex)
- result = rtw_set_key(padapter, psecuritypriv, keyid, 1);
- else
- result = rtw_set_key(padapter, psecuritypriv, keyid, 0);
- }
- }
- }
-
- pwrpriv->bips_processing = false;
-
- pwrpriv->bkeepfwalive = false;
- pwrpriv->bpower_saving = false;
- }
-
- mutex_unlock(&pwrpriv->lock);
-
- return result;
-}
-
-static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
-{
- struct adapter *buddy = adapter->pbuddy_adapter;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wifidirect_info *pwdinfo = &adapter->wdinfo;
- bool ret = false;
-
- if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
- goto exit;
-
- if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE | WIFI_SITE_MONITOR) ||
- check_fwstate(pmlmepriv, WIFI_UNDER_LINKING | WIFI_UNDER_WPS) ||
- check_fwstate(pmlmepriv, WIFI_UNDER_WPS) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE) ||
- !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- goto exit;
-
- /* consider buddy, if exist */
- if (buddy) {
- struct mlme_priv *b_pmlmepriv = &buddy->mlmepriv;
- struct wifidirect_info *b_pwdinfo = &buddy->wdinfo;
-
- if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE | WIFI_SITE_MONITOR) ||
- check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING | WIFI_UNDER_WPS) ||
- check_fwstate(b_pmlmepriv, WIFI_AP_STATE) ||
- check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE) ||
- !rtw_p2p_chk_state(b_pwdinfo, P2P_STATE_NONE))
- goto exit;
- }
- ret = true;
-
-exit:
- return ret;
-}
-
-void rtw_ps_processor(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- pwrpriv->ps_processing = true;
-
- if (pwrpriv->bips_processing)
- goto exit;
-
- if (pwrpriv->ips_mode_req == IPS_NONE)
- goto exit;
-
- if (!rtw_pwr_unassociated_idle(padapter))
- goto exit;
-
- if (pwrpriv->rf_pwrstate == rf_on)
- ips_enter(padapter);
-
-exit:
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
- pwrpriv->ps_processing = false;
-}
-
-static void pwr_state_check_handler(struct timer_list *t)
-{
- struct adapter *padapter =
- from_timer(padapter, t,
- pwrctrlpriv.pwr_state_check_timer);
- rtw_ps_cmd(padapter);
-}
-
-static bool PS_RDY_CHECK(struct adapter *padapter)
-{
- u32 curr_time, delta_time;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- curr_time = jiffies;
- delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
-
- if (delta_time < LPS_DELAY_TIME)
- return false;
-
- if (!check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
- return false;
- if (pwrpriv->bInSuspend)
- return false;
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X &&
- !padapter->securitypriv.binstallGrpkey)
- return false;
- return true;
-}
-
-void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- /* Force leave RF low power mode for 1T1R to prevent
- * conflicting setting in firmware power saving sequence.
- */
- if (mode != PS_MODE_ACTIVE)
- ODM_RF_Saving(odmpriv, true);
- rtl8188e_set_FwPwrMode_cmd(adapter, mode);
-}
-
-void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (ps_mode > PM_Card_Disable)
- return;
-
- if (pwrpriv->pwr_mode == ps_mode) {
- if (ps_mode == PS_MODE_ACTIVE)
- return;
-
- if ((pwrpriv->smart_ps == smart_ps) &&
- (pwrpriv->bcn_ant_mode == bcn_ant_mode))
- return;
- }
-
- if (ps_mode == PS_MODE_ACTIVE) {
- if (pwdinfo->opp_ps == 0) {
- pwrpriv->pwr_mode = ps_mode;
- rtw_set_firmware_ps_mode(padapter, ps_mode);
- pwrpriv->bFwCurrentInPSMode = false;
- }
- } else {
- if (PS_RDY_CHECK(padapter)) {
- pwrpriv->bFwCurrentInPSMode = true;
- pwrpriv->pwr_mode = ps_mode;
- pwrpriv->smart_ps = smart_ps;
- pwrpriv->bcn_ant_mode = bcn_ant_mode;
- rtw_set_firmware_ps_mode(padapter, ps_mode);
-
- /* Set CTWindow after LPS */
- if (pwdinfo->opp_ps == 1)
- p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
- }
- }
-}
-
-static bool lps_rf_on(struct adapter *adapter)
-{
- int res;
- u32 reg;
-
- /* When we halt NIC, we should check if FW LPS is leave. */
- if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
- /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
- /* because Fw is unload. */
- return true;
- }
-
- res = rtw_read32(adapter, REG_RCR, &reg);
- if (res)
- return false;
-
- if (reg & 0x00070000)
- return false;
-
- return true;
-}
-
-/*
- * Return:
- * 0: Leave OK
- * -1: Timeout
- * -2: Other error
- */
-static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(delay_ms);
- s32 err = 0;
-
- while (1) {
- if (lps_rf_on(padapter))
- break;
-
- if (padapter->bSurpriseRemoved) {
- err = -2;
- break;
- }
-
- if (time_after(jiffies, timeout)) {
- err = -1;
- break;
- }
- mdelay(1);
- }
-
- return err;
-}
-
-/* */
-/* Description: */
-/* Enter the leisure power save mode. */
-/* */
-void LPS_Enter(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- if (!PS_RDY_CHECK(padapter))
- return;
-
- if (pwrpriv->bLeisurePs) {
- /* Idle for a while if we connect to AP a while ago. */
- if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
- if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
- pwrpriv->bpower_saving = true;
- /* For Tenda W311R IOT issue */
- rtw_set_ps_mode(padapter, pwrpriv->power_mgnt,
- pwrpriv->smart_ps, 0x40);
- }
- } else {
- pwrpriv->LpsIdleCount++;
- }
- }
-
-}
-
-#define LPS_LEAVE_TIMEOUT_MS 100
-
-/* Description: */
-/* Leave the leisure power save mode. */
-void LPS_Leave(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- if (pwrpriv->bLeisurePs) {
- if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
- rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0x40);
-
- if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
- LPS_RF_ON_check(padapter, LPS_LEAVE_TIMEOUT_MS);
- }
- }
-
- pwrpriv->bpower_saving = false;
-
-}
-
-/* */
-/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
-/* Move code to function by tynli. 2010.03.26. */
-/* */
-void LeaveAllPowerSaveMode(struct adapter *Adapter)
-{
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 enqueue = 0;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
- p2p_ps_wk_cmd(Adapter, P2P_PS_DISABLE, enqueue);
-
- rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
- }
-
-}
-
-void rtw_init_pwrctrl_priv(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- mutex_init(&pwrctrlpriv->lock);
- pwrctrlpriv->rf_pwrstate = rf_on;
- pwrctrlpriv->ips_enter_cnts = 0;
- pwrctrlpriv->ips_leave_cnts = 0;
- pwrctrlpriv->bips_processing = false;
-
- pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
- pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
-
- pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
- pwrctrlpriv->bInSuspend = false;
- pwrctrlpriv->bkeepfwalive = false;
-
- pwrctrlpriv->LpsIdleCount = 0;
- pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
-
- pwrctrlpriv->bFwCurrentInPSMode = false;
-
- pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
- pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
- pwrctrlpriv->bcn_ant_mode = 0;
-
- timer_setup(&pwrctrlpriv->pwr_state_check_timer, pwr_state_check_handler, 0);
-}
-
-/* Wake the NIC up from: 1)IPS 2)USB autosuspend */
-int rtw_pwr_wakeup(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long timeout = jiffies + msecs_to_jiffies(3000);
- unsigned long deny_time;
- int ret;
-
- while (pwrpriv->ps_processing && time_before(jiffies, timeout))
- msleep(10);
-
- /* I think this should be check in IPS, LPS, autosuspend functions... */
- /* Below goto is a success path taken for already linked devices */
- ret = 0;
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- goto exit;
-
- if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) {
- ret = -ENOMEM;
- goto exit;
- }
-
- if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) {
- ret = -EBUSY;
- goto exit;
- }
-
-exit:
- deny_time = jiffies + msecs_to_jiffies(RTW_PWR_STATE_CHK_INTERVAL);
- if (time_before(pwrpriv->ips_deny_time, deny_time))
- pwrpriv->ips_deny_time = deny_time;
- return ret;
-}
-
-int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
-{
- int ret = 0;
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (mode < PS_MODE_NUM) {
- if (pwrctrlpriv->power_mgnt != mode) {
- if (mode == PS_MODE_ACTIVE)
- LeaveAllPowerSaveMode(padapter);
- else
- pwrctrlpriv->LpsIdleCount = 2;
- pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
- }
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
-{
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
- rtw_ips_mode_req(pwrctrlpriv, mode);
- return 0;
- } else if (mode == IPS_NONE) {
- rtw_ips_mode_req(pwrctrlpriv, mode);
- if ((padapter->bSurpriseRemoved == 0) && rtw_pwr_wakeup(padapter))
- return -EFAULT;
- } else {
- return -EINVAL;
- }
- return 0;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
deleted file mode 100644
index fc7568cf948b..000000000000
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ /dev/null
@@ -1,2010 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_RECV_C_
-
-#include <linux/ieee80211.h>
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/usb_ops.h"
-#include "../include/wifi.h"
-#include "../include/rtl8188e_recv.h"
-
-static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
-static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
-
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static u8 rtw_bridge_tunnel_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
-};
-
-static u8 rtw_rfc1042_header[] = {
- 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
-};
-
-static void rtw_signal_stat_timer_hdl(struct timer_list *t);
-
-void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
-{
-
- memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
-
- spin_lock_init(&psta_recvpriv->lock);
-
- rtw_init_queue(&psta_recvpriv->defrag_q);
-
-}
-
-static int rtl8188eu_init_recv_priv(struct adapter *padapter)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int i, err = 0;
- struct recv_buf *precvbuf;
-
- tasklet_init(&precvpriv->recv_tasklet,
- rtl8188eu_recv_tasklet,
- (unsigned long)padapter);
-
- /* init recv_buf */
- rtw_init_queue(&precvpriv->free_recv_buf_queue);
-
- precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
- GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf)
- return -ENOMEM;
-
- precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (!precvbuf->purb) {
- err = -ENOMEM;
- break;
- }
- precvbuf->adapter = padapter;
- precvbuf++;
- }
- precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- skb_queue_head_init(&precvpriv->rx_skb_queue);
- {
- int i;
- size_t tmpaddr = 0;
- size_t alignment = 0;
- struct sk_buff *pskb = NULL;
-
- skb_queue_head_init(&precvpriv->free_recv_skb_queue);
-
- for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
- pskb = __netdev_alloc_skb(padapter->pnetdev,
- MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
- if (pskb) {
- pskb->dev = padapter->pnetdev;
- tmpaddr = (size_t)pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
-
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
- pskb = NULL;
- }
- }
-
- return err;
-}
-
-int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
-{
- int i;
- struct recv_frame *precvframe;
- int err;
-
- spin_lock_init(&precvpriv->lock);
-
- rtw_init_queue(&precvpriv->free_recv_queue);
- rtw_init_queue(&precvpriv->recv_pending_queue);
- rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
-
- precvpriv->adapter = padapter;
-
- precvpriv->free_recvframe_cnt = NR_RECVFRAME;
-
- precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
- if (!precvpriv->pallocated_frame_buf)
- return -ENOMEM;
-
- precvpriv->precv_frame_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
-
- precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
-
- for (i = 0; i < NR_RECVFRAME; i++) {
- INIT_LIST_HEAD(&precvframe->list);
-
- list_add_tail(&precvframe->list, &precvpriv->free_recv_queue.queue);
-
- precvframe->pkt = NULL;
-
- precvframe->len = 0;
-
- precvframe->adapter = padapter;
- precvframe++;
- }
- precvpriv->rx_pending_cnt = 1;
-
- err = rtl8188eu_init_recv_priv(padapter);
-
- timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
- precvpriv->signal_stat_sampling_interval = 1000; /* ms */
-
- rtw_set_signal_stat_timer(precvpriv);
-
- return err;
-}
-
-static void rtl8188eu_free_recv_priv(struct adapter *padapter)
-{
- int i;
- struct recv_buf *precvbuf;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- usb_free_urb(precvbuf->purb);
- precvbuf++;
- }
-
- kfree(precvpriv->pallocated_recv_buf);
-
- skb_queue_purge(&precvpriv->rx_skb_queue);
-
- skb_queue_purge(&precvpriv->free_recv_skb_queue);
-}
-
-void _rtw_free_recv_priv(struct recv_priv *precvpriv)
-{
- struct adapter *padapter = precvpriv->adapter;
-
- rtw_free_uc_swdec_pending_queue(padapter);
-
- vfree(precvpriv->pallocated_frame_buf);
-
- rtl8188eu_free_recv_priv(padapter);
- _cancel_timer_ex(&precvpriv->signal_stat_timer);
-}
-
-struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
-{
- struct recv_frame *hdr;
- struct list_head *plist, *phead;
- struct adapter *padapter;
- struct recv_priv *precvpriv;
-
- if (list_empty(&pfree_recv_queue->queue)) {
- hdr = NULL;
- } else {
- phead = get_list_head(pfree_recv_queue);
-
- plist = phead->next;
-
- hdr = container_of(plist, struct recv_frame, list);
-
- list_del_init(&hdr->list);
- padapter = hdr->adapter;
- if (padapter) {
- precvpriv = &padapter->recvpriv;
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt--;
- }
- }
-
- return (struct recv_frame *)hdr;
-}
-
-struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
-{
- struct recv_frame *precvframe;
-
- spin_lock_bh(&pfree_recv_queue->lock);
-
- precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
-
- spin_unlock_bh(&pfree_recv_queue->lock);
-
- return precvframe;
-}
-
-int rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv_queue)
-{
- struct adapter *padapter;
- struct recv_priv *precvpriv;
-
- if (!precvframe)
- return _FAIL;
- padapter = precvframe->adapter;
- precvpriv = &padapter->recvpriv;
- if (precvframe->pkt) {
- dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
- precvframe->pkt = NULL;
- }
-
- spin_lock_bh(&pfree_recv_queue->lock);
-
- list_del_init(&precvframe->list);
-
- precvframe->len = 0;
-
- list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
-
- if (padapter && (pfree_recv_queue == &precvpriv->free_recv_queue))
- precvpriv->free_recvframe_cnt++;
-
- spin_unlock_bh(&pfree_recv_queue->lock);
-
- return _SUCCESS;
-}
-
-int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
-{
- struct adapter *padapter = precvframe->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- list_del_init(&precvframe->list);
- list_add_tail(&precvframe->list, get_list_head(queue));
-
- if (padapter) {
- if (queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
-{
- int ret;
-
- spin_lock_bh(&queue->lock);
- ret = _rtw_enqueue_recvframe(precvframe, queue);
- spin_unlock_bh(&queue->lock);
-
- return ret;
-}
-
-/*
- * caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
- * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
- *
- * using spinlock to protect
- *
- */
-
-void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
-{
- struct recv_frame *hdr;
- struct list_head *plist, *phead;
-
- spin_lock(&pframequeue->lock);
-
- phead = get_list_head(pframequeue);
- plist = phead->next;
-
- while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
-
- plist = plist->next;
-
- rtw_free_recvframe((struct recv_frame *)hdr, pfree_recv_queue);
- }
-
- spin_unlock(&pframequeue->lock);
-
-}
-
-u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
-{
- u32 cnt = 0;
- struct recv_frame *pending_frame;
-
- while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
- rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
- cnt++;
- }
-
- return cnt;
-}
-
-static void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- u32 cur_time = 0;
-
- if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = jiffies;
- } else {
- cur_time = jiffies;
-
- if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
- psecuritypriv->btkip_countermeasure = true;
- psecuritypriv->last_mic_err_time = 0;
- psecuritypriv->btkip_countermeasure_time = cur_time;
- } else {
- psecuritypriv->last_mic_err_time = jiffies;
- }
- }
-
- memset(&ev, 0x00, sizeof(ev));
- if (bgroup)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
- memset(&wrqu, 0x00, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
- &wrqu, (char *)&ev);
-}
-
-static int recvframe_chkmic(struct adapter *adapter, struct recv_frame *precvframe)
-{
- int i, res = _SUCCESS;
- u32 datalen;
- u8 miccode[8];
- u8 bmic_err = false, brpt_micerror = true;
- u8 *pframe, *payload, *pframemic;
- u8 *mickey;
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
-
- if (prxattrib->encrypt == _TKIP_) {
- /* calculate mic code */
- if (stainfo) {
- if (is_multicast_ether_addr(prxattrib->ra)) {
- mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
-
- if (!psecuritypriv) {
- res = _FAIL;
- goto exit;
- }
- } else {
- mickey = &stainfo->dot11tkiprxmickey.skey[0];
- }
-
- datalen = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len - prxattrib->icv_len - 8;/* icv_len included the mic code */
- pframe = precvframe->rx_data;
- payload = pframe + prxattrib->hdrlen + prxattrib->iv_len;
-
- rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
- (unsigned char)prxattrib->priority); /* care the length of the data */
-
- pframemic = payload + datalen;
-
- bmic_err = false;
-
- for (i = 0; i < 8; i++) {
- if (miccode[i] != *(pframemic + i))
- bmic_err = true;
- }
-
- if (bmic_err) {
- /* double check key_index for some timing issue , */
- /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
- if (is_multicast_ether_addr(prxattrib->ra) && prxattrib->key_index != pmlmeinfo->key_index)
- brpt_micerror = false;
-
- if ((prxattrib->bdecrypted) && (brpt_micerror))
- rtw_handle_tkip_mic_err(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
-
- res = _FAIL;
- } else {
- /* mic checked ok */
- if (!psecuritypriv->bcheck_grpkey && is_multicast_ether_addr(prxattrib->ra))
- psecuritypriv->bcheck_grpkey = true;
- }
- }
-
- recvframe_pull_tail(precvframe, 8);
- }
-
-exit:
-
- return res;
-}
-
-/* decrypt and set the ivlen, icvlen of the recv_frame */
-static struct recv_frame *decryptor(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct rx_pkt_attrib *prxattrib = &precv_frame->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct recv_frame *return_packet = precv_frame;
- u32 res = _SUCCESS;
-
- if (prxattrib->encrypt > 0) {
- u8 *iv = precv_frame->rx_data + prxattrib->hdrlen;
-
- prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
-
- if (prxattrib->key_index > WEP_KEYS) {
- switch (prxattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
- break;
- case _TKIP_:
- case _AES_:
- default:
- prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
- break;
- }
- }
- }
-
- if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt))) {
- psecuritypriv->hw_decrypted = false;
-
- switch (prxattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- rtw_wep_decrypt(padapter, precv_frame);
- break;
- case _TKIP_:
- res = rtw_tkip_decrypt(padapter, precv_frame);
- break;
- case _AES_:
- res = rtw_aes_decrypt(padapter, precv_frame);
- break;
- default:
- break;
- }
- } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
- (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
- psecuritypriv->hw_decrypted = true;
-
- if (res == _FAIL) {
- rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
- return_packet = NULL;
- } else {
- prxattrib->bdecrypted = true;
- }
-
- return return_packet;
-}
-
-/* set the security information in the recv_frame */
-static struct recv_frame *portctrl(struct adapter *adapter, struct recv_frame *precv_frame)
-{
- u8 *psta_addr, *ptr;
- uint auth_alg;
- struct recv_frame *pfhdr;
- struct sta_info *psta;
- struct sta_priv *pstapriv;
- struct recv_frame *prtnframe;
- u16 ether_type = 0;
- u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
- struct rx_pkt_attrib *pattrib;
- __be16 be_tmp;
-
- pstapriv = &adapter->stapriv;
-
- auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
-
- ptr = precv_frame->rx_data;
- pfhdr = precv_frame;
- pattrib = &pfhdr->attrib;
- psta_addr = pattrib->ta;
-
- prtnframe = NULL;
-
- psta = rtw_get_stainfo(pstapriv, psta_addr);
-
- if (auth_alg == 2) {
- if (psta && psta->ieee8021x_blocked) {
- /* blocked */
- /* only accept EAPOL frame */
- prtnframe = precv_frame;
-
- /* get ether_type */
- ptr = ptr + pfhdr->attrib.hdrlen + pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
- memcpy(&be_tmp, ptr, 2);
- ether_type = ntohs(be_tmp);
-
- if (ether_type == eapol_type) {
- prtnframe = precv_frame;
- } else {
- /* free this frame */
- rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
- prtnframe = NULL;
- }
- } else {
- /* allowed */
- /* check decryption status, and decrypt the frame if needed */
- prtnframe = precv_frame;
- }
- } else {
- prtnframe = precv_frame;
- }
-
- return prtnframe;
-}
-
-static int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
-{
- int tid = precv_frame->attrib.priority;
-
- u16 seq_ctrl = ((precv_frame->attrib.seq_num & 0xffff) << 4) |
- (precv_frame->attrib.frag_num & 0xf);
-
- if (tid > 15)
- return _FAIL;
-
- if (1) {/* if (bretry) */
- if (seq_ctrl == prxcache->tid_rxseq[tid])
- return _FAIL;
- }
-
- prxcache->tid_rxseq[tid] = seq_ctrl;
-
- return _SUCCESS;
-}
-
-static void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned char pwrbit;
- u8 *ptr = precv_frame->rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta = NULL;
-
- psta = rtw_get_stainfo(pstapriv, pattrib->src);
-
- pwrbit = GetPwrMgt(ptr);
-
- if (psta) {
- if (pwrbit) {
- if (!(psta->state & WIFI_SLEEP_STATE))
- stop_sta_xmit(padapter, psta);
- } else {
- if (psta->state & WIFI_SLEEP_STATE)
- wakeup_sta_to_xmit(padapter, psta);
- }
- }
-}
-
-static void process_wmmps_data(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta = NULL;
-
- psta = rtw_get_stainfo(pstapriv, pattrib->src);
-
- if (!psta)
- return;
-
- if (!psta->qos_option)
- return;
-
- if (!(psta->qos_info & 0xf))
- return;
-
- if (psta->state & WIFI_SLEEP_STATE) {
- u8 wmmps_ac = 0;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- if (wmmps_ac) {
- if (psta->sleepq_ac_len > 0) {
- /* process received triggered frame */
- xmit_delivery_enabled_frames(padapter, psta);
- } else {
- /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
- issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
- }
- }
- }
-}
-
-static void count_rx_stats(struct adapter *padapter, struct recv_frame *prframe, struct sta_info *sta)
-{
- int sz;
- struct sta_info *psta = NULL;
- struct stainfo_stats *pstats = NULL;
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- sz = get_recvframe_len(prframe);
- precvpriv->rx_bytes += sz;
-
- padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
-
- if (!is_broadcast_ether_addr(pattrib->dst) && !is_multicast_ether_addr(pattrib->dst))
- padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
-
- if (sta)
- psta = sta;
- else
- psta = prframe->psta;
-
- if (psta) {
- pstats = &psta->sta_stats;
-
- pstats->rx_data_pkts++;
- pstats->rx_bytes += sz;
- }
-}
-
-static int sta2sta_data_frame(struct adapter *adapter,
- struct recv_frame *precv_frame, struct sta_info **psta)
-{
- int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *mybssid = get_bssid(pmlmepriv);
- u8 *myhwaddr = myid(&adapter->eeprompriv);
- u8 *sta_addr = NULL;
- bool bmcast = is_multicast_ether_addr(pattrib->dst);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- /* filter packets that SA is myself or multicast or broadcast */
- if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
-
- if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
- ret = _FAIL;
- goto exit;
- }
-
- if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
-
- sta_addr = pattrib->src;
- } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
- if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
- sta_addr = pattrib->bssid;
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- if (bmcast) {
- /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
- if (!is_multicast_ether_addr(pattrib->bssid)) {
- ret = _FAIL;
- goto exit;
- }
- } else { /* not mc-frame */
- /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
- if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
-
- sta_addr = pattrib->src;
- }
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- sta_addr = mybssid;
- } else {
- ret = _FAIL;
- }
-
- if (bmcast)
- *psta = rtw_get_bcmc_stainfo(adapter);
- else
- *psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
-
- if (!*psta)
- goto exit;
-
-exit:
-
- return ret;
-}
-
-static int ap2sta_data_frame(
- struct adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta)
-{
- u8 *ptr = precv_frame->rx_data;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- int ret = _SUCCESS;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *mybssid = get_bssid(pmlmepriv);
- u8 *myhwaddr = myid(&adapter->eeprompriv);
- bool bmcast = is_multicast_ether_addr(pattrib->dst);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
- (check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
- /* filter packets that SA is myself or multicast or broadcast */
- if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
-
- /* da should be for me */
- if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
- ret = _FAIL;
- goto exit;
- }
-
- /* check BSSID */
- if (is_zero_ether_addr(pattrib->bssid) || is_zero_ether_addr(mybssid) ||
- (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
- if (!bmcast)
- issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
-
- ret = _FAIL;
- goto exit;
- }
-
- if (bmcast)
- *psta = rtw_get_bcmc_stainfo(adapter);
- else
- *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
-
- if (!*psta) {
- ret = _FAIL;
- goto exit;
- }
-
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
- /* We count the nullfunc frame, but we'll not pass it on to higher layers. */
- count_rx_stats(adapter, precv_frame, *psta);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
-
- memcpy(pattrib->bssid, mybssid, ETH_ALEN);
-
- *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
- if (!*psta) {
- ret = _FAIL;
- goto exit;
- }
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* Special case */
- ret = RTW_RX_HANDLED;
- goto exit;
- } else {
- if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
- *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
- if (!*psta)
- issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- }
-
- ret = _FAIL;
- }
-
-exit:
-
- return ret;
-}
-
-static int sta2ap_data_frame(struct adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta)
-{
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *ptr = precv_frame->rx_data;
- __le16 fc = *(__le16 *)ptr;
- unsigned char *mybssid = get_bssid(pmlmepriv);
- int ret = _SUCCESS;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
- if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
- ret = _FAIL;
- goto exit;
- }
-
- *psta = rtw_get_stainfo(pstapriv, pattrib->src);
- if (!*psta) {
- issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
-
- ret = RTW_RX_HANDLED;
- goto exit;
- }
-
- process_pwrbit_data(adapter, precv_frame);
-
- if (ieee80211_is_data_qos(fc))
- process_wmmps_data(adapter, precv_frame);
-
- if (GetFrameSubType(ptr) & BIT(6)) {
- /* No data, will not indicate to upper layer, temporily count it here */
- count_rx_stats(adapter, precv_frame, *psta);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
- } else {
- u8 *myhwaddr = myid(&adapter->eeprompriv);
-
- if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
- ret = RTW_RX_HANDLED;
- goto exit;
- }
- issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
-
-exit:
-
- return ret;
-}
-
-static void validate_recv_ctrl_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)hdr;
- u8 wmmps_ac;
- struct sta_info *psta;
-
- /* receive the frames that ra(a1) is my address */
- if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN))
- return;
-
- /* only handle ps-poll */
- if (!ieee80211_is_pspoll(hdr->frame_control))
- return;
-
- psta = rtw_get_stainfo(pstapriv, hdr->addr2);
- if (!psta || psta->aid != (le16_to_cpu(pspoll->aid) & 0x3FFF))
- return;
-
- /* for rx pkt statistics */
- psta->sta_stats.rx_ctrl_pkts++;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
-
- if (wmmps_ac)
- return;
-
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
-
- if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- list_del_init(&pxmitframe->list);
-
- psta->sleepq_len--;
-
- if (psta->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
-
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- /* update BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- } else {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- if (psta->sleepq_len == 0)
- /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
- issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
- else
- psta->sleepq_len = 0;
-
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- /* update BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- }
- spin_unlock_bh(&pxmitpriv->lock);
- }
-}
-
-struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_frame *precv_frame);
-
-static void validate_recv_mgnt_frame(struct adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct sta_info *psta;
- struct ieee80211_hdr *hdr;
-
- precv_frame = recvframe_chk_defrag(padapter, precv_frame);
- if (!precv_frame)
- return;
-
- hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
- if (psta) {
- psta->sta_stats.rx_mgnt_pkts++;
- if (ieee80211_is_beacon(hdr->frame_control))
- psta->sta_stats.rx_beacon_pkts++;
- else if (ieee80211_is_probe_req(hdr->frame_control))
- psta->sta_stats.rx_probereq_pkts++;
- else if (ieee80211_is_probe_resp(hdr->frame_control)) {
- if (!memcmp(padapter->eeprompriv.mac_addr, hdr->addr1, ETH_ALEN))
- psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(hdr->addr1) || is_multicast_mac_addr(hdr->addr1))
- psta->sta_stats.rx_probersp_bm_pkts++;
- else
- psta->sta_stats.rx_probersp_uo_pkts++;
- }
- }
-
- mgt_dispatcher(padapter, precv_frame);
-}
-
-static int validate_recv_data_frame(struct adapter *adapter,
- struct recv_frame *precv_frame)
-{
- struct sta_info *psta = NULL;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- int ret;
-
- memcpy(pattrib->dst, ieee80211_get_DA(hdr), ETH_ALEN);
- memcpy(pattrib->src, ieee80211_get_SA(hdr), ETH_ALEN);
-
- /* address4 is used only if both to_ds and from_ds are set */
- if (ieee80211_has_a4(hdr->frame_control))
- return _FAIL;
-
- memcpy(pattrib->ra, hdr->addr1, ETH_ALEN);
- memcpy(pattrib->ta, hdr->addr2, ETH_ALEN);
-
- if (ieee80211_has_fromds(hdr->frame_control)) {
- memcpy(pattrib->bssid, hdr->addr2, ETH_ALEN);
- ret = ap2sta_data_frame(adapter, precv_frame, &psta);
- } else if (ieee80211_has_tods(hdr->frame_control)) {
- memcpy(pattrib->bssid, hdr->addr1, ETH_ALEN);
- ret = sta2ap_data_frame(adapter, precv_frame, &psta);
- } else {
- memcpy(pattrib->bssid, hdr->addr3, ETH_ALEN);
- ret = sta2sta_data_frame(adapter, precv_frame, &psta);
- }
-
- if (ret == _FAIL || ret == RTW_RX_HANDLED)
- return ret;
-
- if (!psta)
- return _FAIL;
-
- precv_frame->psta = psta;
-
- pattrib->amsdu = 0;
- pattrib->ack_policy = 0;
- /* parsing QC field */
- if (pattrib->qos) {
- struct ieee80211_qos_hdr *qos_hdr = (struct ieee80211_qos_hdr *)hdr;
-
- pattrib->priority = ieee80211_get_tid(hdr);
- pattrib->ack_policy = GetAckpolicy(&qos_hdr->qos_ctrl);
- pattrib->amsdu = GetAMsdu(&qos_hdr->qos_ctrl);
- pattrib->hdrlen = sizeof(*qos_hdr);
-
- if (pattrib->priority != 0 && pattrib->priority != 3)
- adapter->recvpriv.bIsAnyNonBEPkts = true;
- } else {
- pattrib->priority = 0;
- pattrib->hdrlen = 24;
- }
-
- if (pattrib->order)/* HT-CTRL 11n */
- pattrib->hdrlen += 4;
-
- precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
-
- /* decache, drop duplicate recv packets */
- if (recv_decache(precv_frame, ieee80211_has_retry(hdr->frame_control),
- &psta->sta_recvpriv.rxcache) == _FAIL)
- return _FAIL;
-
- if (pattrib->privacy) {
- GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
-
- SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
- } else {
- pattrib->encrypt = 0;
- pattrib->iv_len = 0;
- pattrib->icv_len = 0;
- }
-
- return _SUCCESS;
-}
-
-static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv_frame)
-{
- /* shall check frame subtype, to / from ds, da, bssid */
-
- /* then call check if rx seq/frag. duplicated. */
-
- int retval = _FAIL;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
-
- if (ch_set_idx >= 0)
- pmlmeext->channel_set[ch_set_idx].rx_count++;
- }
-
- if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_VERS)) != 0)
- return _FAIL;
-
- pattrib->frag_num = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
- pattrib->seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-
- pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
- pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
- pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
- pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
- pattrib->order = ieee80211_has_order(hdr->frame_control);
-
- /* We return _SUCCESS only for data frames. */
- if (ieee80211_is_mgmt(hdr->frame_control))
- validate_recv_mgnt_frame(adapter, precv_frame);
- else if (ieee80211_is_ctl(hdr->frame_control))
- validate_recv_ctrl_frame(adapter, precv_frame);
- else if (ieee80211_is_data(hdr->frame_control)) {
- rtw_led_control(adapter, LED_CTL_RX);
- pattrib->qos = ieee80211_is_data_qos(hdr->frame_control);
- retval = validate_recv_data_frame(adapter, precv_frame);
- if (retval == _FAIL) {
- struct recv_priv *precvpriv = &adapter->recvpriv;
-
- precvpriv->rx_drop++;
- }
- }
-
- return retval;
-}
-
-/* remove the wlanhdr and add the eth_hdr */
-
-static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
-{
- int rmv_len;
- u16 eth_type, len;
- __be16 be_tmp;
- u8 bsnaphdr;
- u8 *psnap_type;
- struct ieee80211_snap_hdr *psnap;
-
- int ret = _SUCCESS;
- struct adapter *adapter = precvframe->adapter;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- u8 *ptr = precvframe->rx_data; /* point to frame_ctrl field */
- struct rx_pkt_attrib *pattrib = &precvframe->attrib;
-
- if (pattrib->encrypt)
- recvframe_pull_tail(precvframe, pattrib->icv_len);
-
- psnap = (struct ieee80211_snap_hdr *)(ptr + pattrib->hdrlen + pattrib->iv_len);
- psnap_type = ptr + pattrib->hdrlen + pattrib->iv_len + SNAP_SIZE;
- /* convert hdr + possible LLC headers into Ethernet header */
- if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
- memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) &&
- memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2)) ||
- !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
- bsnaphdr = true;
- } else {
- /* Leave Ethernet header part of hdr and full payload */
- bsnaphdr = false;
- }
-
- rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
- len = precvframe->len - rmv_len;
-
- memcpy(&be_tmp, ptr + rmv_len, 2);
- eth_type = ntohs(be_tmp); /* pattrib->ether_type */
- pattrib->eth_type = eth_type;
-
- if ((check_fwstate(pmlmepriv, WIFI_MP_STATE))) {
- ptr += rmv_len;
- *ptr = 0x87;
- *(ptr + 1) = 0x12;
-
- eth_type = 0x8712;
- /* append rx status for mp test packets */
- ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + 2) - 24);
- if (!ptr)
- return _FAIL;
- memcpy(ptr, get_rxmem(precvframe), 24);
- ptr += 24;
- } else {
- ptr = recvframe_pull(precvframe, (rmv_len - sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
- if (!ptr)
- return _FAIL;
- }
-
- memcpy(ptr, pattrib->dst, ETH_ALEN);
- memcpy(ptr + ETH_ALEN, pattrib->src, ETH_ALEN);
-
- if (!bsnaphdr) {
- be_tmp = htons(len);
- memcpy(ptr + 12, &be_tmp, 2);
- }
-
- return ret;
-}
-
-/* perform defrag */
-static struct recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q)
-{
- struct list_head *plist, *phead;
- u8 wlanhdr_offset;
- u8 curfragnum;
- struct recv_frame *pfhdr, *pnfhdr;
- struct recv_frame *prframe, *pnextrframe;
- struct __queue *pfree_recv_queue;
-
- curfragnum = 0;
- pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
-
- phead = get_list_head(defrag_q);
- plist = phead->next;
- pfhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)pfhdr;
- list_del_init(&prframe->list);
-
- if (curfragnum != pfhdr->attrib.frag_num) {
- /* the first fragment number must be 0 */
- /* free the whole queue */
- rtw_free_recvframe(prframe, pfree_recv_queue);
- rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
-
- return NULL;
- }
-
- curfragnum++;
-
- plist = get_list_head(defrag_q);
- plist = phead->next;
- pfhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)pfhdr;
- list_del_init(&prframe->list);
-
- plist = plist->next;
-
- while (phead != plist) {
- pnfhdr = container_of(plist, struct recv_frame, list);
- pnextrframe = (struct recv_frame *)pnfhdr;
-
- /* check the fragment sequence (2nd ~n fragment frame) */
-
- if (curfragnum != pnfhdr->attrib.frag_num) {
- /* the fragment number must be increasing (after decache) */
- /* release the defrag_q & prframe */
- rtw_free_recvframe(prframe, pfree_recv_queue);
- rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
- return NULL;
- }
-
- curfragnum++;
-
- /* copy the 2nd~n fragment frame's payload to the first fragment */
- /* get the 2nd~last fragment frame's payload */
-
- wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
-
- recvframe_pull(pnextrframe, wlanhdr_offset);
-
- /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
- recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
-
- /* memcpy */
- memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
-
- recvframe_put(prframe, pnfhdr->len);
-
- pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
- plist = plist->next;
- }
-
- /* free the defrag_q queue and return the prframe */
- rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
-
- return prframe;
-}
-
-/* check if need to defrag, if needed queue the frame to defrag_q */
-struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- u8 ismfrag;
- u8 fragnum;
- u8 *psta_addr;
- struct recv_frame *pfhdr;
- struct sta_info *psta;
- struct sta_priv *pstapriv;
- struct list_head *phead;
- struct recv_frame *prtnframe = NULL;
- struct __queue *pfree_recv_queue, *pdefrag_q;
-
- pstapriv = &padapter->stapriv;
-
- pfhdr = precv_frame;
-
- pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
- /* need to define struct of wlan header frame ctrl */
- ismfrag = pfhdr->attrib.mfrag;
- fragnum = pfhdr->attrib.frag_num;
-
- psta_addr = pfhdr->attrib.ta;
- psta = rtw_get_stainfo(pstapriv, psta_addr);
- if (!psta) {
- __le16 fc = *(__le16 *)pfhdr->rx_data;
-
- if (ieee80211_is_data(fc)) {
- psta = rtw_get_bcmc_stainfo(padapter);
- pdefrag_q = &psta->sta_recvpriv.defrag_q;
- } else {
- pdefrag_q = NULL;
- }
- } else {
- pdefrag_q = &psta->sta_recvpriv.defrag_q;
- }
-
- if ((ismfrag == 0) && (fragnum == 0))
- prtnframe = precv_frame;/* isn't a fragment frame */
-
- if (ismfrag == 1) {
- /* 0~(n-1) fragment frame */
- /* enqueue to defraf_g */
- if (pdefrag_q) {
- if (fragnum == 0) {
- /* the first fragment */
- if (!list_empty(&pdefrag_q->queue)) {
- /* free current defrag_q */
- rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
- }
- }
-
- /* Then enqueue the 0~(n-1) fragment into the defrag_q */
-
- phead = get_list_head(pdefrag_q);
- list_add_tail(&pfhdr->list, phead);
-
- prtnframe = NULL;
- } else {
- /* can't find this ta's defrag_queue, so free this recv_frame */
- if (precv_frame && pfree_recv_queue)
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
- prtnframe = NULL;
- }
- }
-
- if ((ismfrag == 0) && (fragnum != 0)) {
- /* the last fragment frame */
- /* enqueue the last fragment */
- if (pdefrag_q) {
- phead = get_list_head(pdefrag_q);
- list_add_tail(&pfhdr->list, phead);
-
- /* call recvframe_defrag to defrag */
- precv_frame = recvframe_defrag(padapter, pdefrag_q);
- prtnframe = precv_frame;
- } else {
- /* can't find this ta's defrag_queue, so free this recv_frame */
- if (precv_frame && pfree_recv_queue)
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
- prtnframe = NULL;
- }
- }
-
- if (prtnframe && prtnframe->attrib.privacy) {
- /* after defrag we must check tkip mic code */
- if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
- if (precv_frame && pfree_recv_queue)
- rtw_free_recvframe(prtnframe, pfree_recv_queue);
- prtnframe = NULL;
- }
- }
-
- return prtnframe;
-}
-
-static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
-{
- int a_len, padding_len;
- u16 eth_type, nSubframe_Length;
- u8 nr_subframes, i;
- unsigned char *pdata;
- struct rx_pkt_attrib *pattrib;
- struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
-
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
-
- nr_subframes = 0;
-
- pattrib = &prframe->attrib;
-
- recvframe_pull(prframe, prframe->attrib.hdrlen);
-
- if (prframe->attrib.iv_len > 0)
- recvframe_pull(prframe, prframe->attrib.iv_len);
-
- a_len = prframe->len;
-
- pdata = prframe->rx_data;
-
- while (a_len > ETH_HLEN) {
- /* Offset 12 denote 2 mac address */
- nSubframe_Length = RTW_GET_BE16(pdata + 12);
-
- if (a_len < ETH_HLEN + nSubframe_Length)
- goto exit;
-
- /* move the data point to data content */
- pdata += ETH_HLEN;
- a_len -= ETH_HLEN;
-
- /* Allocate new skb for releasing to upper layer */
- sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, pdata, nSubframe_Length);
- } else {
- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
- if (sub_skb) {
- sub_skb->data = pdata;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- break;
- }
- }
-
- subframes[nr_subframes++] = sub_skb;
-
- if (nr_subframes >= MAX_SUBFRAME_COUNT)
- break;
-
- pdata += nSubframe_Length;
- a_len -= nSubframe_Length;
- if (a_len != 0) {
- padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4 - 1));
- if (padding_len == 4)
- padding_len = 0;
-
- if (a_len < padding_len)
- goto exit;
-
- pdata += padding_len;
- a_len -= padding_len;
- }
- }
-
- for (i = 0; i < nr_subframes; i++) {
- sub_skb = subframes[i];
- /* convert hdr + possible LLC headers into Ethernet header */
- eth_type = RTW_GET_BE16(&sub_skb->data[6]);
- if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
- eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
- } else {
- __be16 len;
- /* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
- memcpy(skb_push(sub_skb, 2), &len, 2);
- memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
- }
-
- /* Indicate the packets to upper layer */
- /* Insert NAT2.5 RX here! */
- sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
- sub_skb->dev = padapter->pnetdev;
-
- sub_skb->ip_summed = CHECKSUM_NONE;
-
- netif_rx(sub_skb);
- }
-
-exit:
-
- prframe->len = 0;
- rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
-
- return _SUCCESS;
-}
-
-static bool check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
-{
- u8 wsize = preorder_ctrl->wsize_b;
- u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
-
- /* Rx Reorder initialize condition. */
- if (preorder_ctrl->indicate_seq == 0xFFFF)
- preorder_ctrl->indicate_seq = seq_num;
-
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
- return false;
-
- /* */
- /* Sliding window manipulation. Conditions includes: */
- /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
- /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
- /* */
- if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
- } else if (SN_LESS(wend, seq_num)) {
- if (seq_num >= (wsize - 1))
- preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
- else
- preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
- }
-
- return true;
-}
-
-static bool enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- struct list_head *phead, *plist;
- struct recv_frame *hdr;
- struct rx_pkt_attrib *pnextattrib;
-
- phead = get_list_head(ppending_recvframe_queue);
- plist = phead->next;
-
- while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
- pnextattrib = &hdr->attrib;
-
- if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
- plist = plist->next;
- else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
- return false;
- else
- break;
- }
-
- list_del_init(&prframe->list);
-
- list_add_tail(&prframe->list, plist);
- return true;
-}
-
-static int rtw_recv_indicatepkt(struct adapter *padapter, struct recv_frame *precv_frame)
-{
- struct recv_priv *precvpriv;
- struct __queue *pfree_recv_queue;
- struct sk_buff *skb;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- precvpriv = &padapter->recvpriv;
- pfree_recv_queue = &precvpriv->free_recv_queue;
-
- skb = precv_frame->pkt;
- if (!skb)
- goto _recv_indicatepkt_drop;
-
- skb->data = precv_frame->rx_data;
-
- skb_set_tail_pointer(skb, precv_frame->len);
-
- skb->len = precv_frame->len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sk_buff *pskb2 = NULL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- bool bmcast = is_multicast_ether_addr(pattrib->dst);
-
- if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) {
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo(padapter);
- pskb2 = skb_clone(skb, GFP_ATOMIC);
- } else {
- psta = rtw_get_stainfo(pstapriv, pattrib->dst);
- }
-
- if (psta) {
- struct net_device *pnetdev;
-
- pnetdev = (struct net_device *)padapter->pnetdev;
- skb->dev = pnetdev;
- skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
-
- rtw_xmit_entry(skb, pnetdev);
-
- if (bmcast)
- skb = pskb2;
- else
- goto _recv_indicatepkt_end;
- }
- }
- }
-
- rcu_read_lock();
- rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->dev = padapter->pnetdev;
- skb->protocol = eth_type_trans(skb, padapter->pnetdev);
-
- netif_rx(skb);
-
-_recv_indicatepkt_end:
-
- /* pointers to NULL before rtw_free_recvframe() */
- precv_frame->pkt = NULL;
-
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _SUCCESS;
-
-_recv_indicatepkt_drop:
-
- /* enqueue back to free_recv_queue */
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _FAIL;
-}
-
-static bool recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
-{
- struct list_head *phead, *plist;
- struct recv_frame *prframe;
- struct rx_pkt_attrib *pattrib;
- int bPktInBuf = false;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- phead = get_list_head(ppending_recvframe_queue);
- plist = phead->next;
-
- /* Handling some condition for forced indicate case. */
- if (bforced) {
- if (list_empty(phead))
- return true;
-
- prframe = container_of(plist, struct recv_frame, list);
- pattrib = &prframe->attrib;
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- }
-
- /* Prepare indication list and indication. */
- /* Check if there is any packet need indicate. */
- while (!list_empty(phead)) {
- prframe = container_of(plist, struct recv_frame, list);
- pattrib = &prframe->attrib;
-
- if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
- plist = plist->next;
- list_del_init(&prframe->list);
-
- if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
-
- /* Set this as a lock to make sure that only one thread is indicating packet. */
-
- /* indicate this recv_frame */
- if (!pattrib->amsdu) {
- if ((!padapter->bDriverStopped) &&
- (!padapter->bSurpriseRemoved))
- rtw_recv_indicatepkt(padapter, prframe);/* indicate this recv_frame */
- } else if (pattrib->amsdu == 1) {
- if (amsdu_to_msdu(padapter, prframe) != _SUCCESS)
- rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
- } else {
- /* error condition; */
- }
-
- /* Update local variables. */
- bPktInBuf = false;
- } else {
- bPktInBuf = true;
- break;
- }
- }
- return bPktInBuf;
-}
-
-static int recv_indicatepkt_reorder(struct adapter *padapter, struct recv_frame *prframe)
-{
- int retval = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct recv_reorder_ctrl *preorder_ctrl = prframe->preorder_ctrl;
- struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- if (!pattrib->amsdu) {
- /* s1. */
- wlanhdr_to_ethhdr(prframe);
-
- if (!pattrib->qos) {
- if (!padapter->bDriverStopped &&
- !padapter->bSurpriseRemoved) {
- rtw_recv_indicatepkt(padapter, prframe);
- return _SUCCESS;
- }
-
- return _FAIL;
- }
-
- if (!preorder_ctrl->enable) {
- /* indicate this recv_frame */
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- rtw_recv_indicatepkt(padapter, prframe);
-
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096;
- return _SUCCESS;
- }
- } else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
- if (!preorder_ctrl->enable) {
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- retval = amsdu_to_msdu(padapter, prframe);
-
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096;
- return retval;
- }
- }
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
-
- /* s2. check if winstart_b(indicate_seq) needs to been updated */
- if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num))
- goto _err_exit;
-
- /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
- if (!enqueue_reorder_recvframe(preorder_ctrl, prframe))
- goto _err_exit;
-
- /* s4. */
- /* Indication process. */
- /* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */
- /* with the SeqNum smaller than latest WinStart and buffer other packets. */
- /* */
- /* For Rx Reorder condition: */
- /* 1. All packets with SeqNum smaller than WinStart => Indicate */
- /* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */
- /* */
-
- /* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
- if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
- _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- } else {
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
- }
-
- return _SUCCESS;
-
-_err_exit:
-
- spin_unlock_bh(&ppending_recvframe_queue->lock);
-
- return _FAIL;
-}
-
-void rtw_reordering_ctrl_timeout_handler(void *pcontext)
-{
- struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
- struct adapter *padapter = preorder_ctrl->padapter;
- struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- return;
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
-
- if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true))
- _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
-
- spin_unlock_bh(&ppending_recvframe_queue->lock);
-}
-
-static int process_recv_indicatepkts(struct adapter *padapter, struct recv_frame *prframe)
-{
- int retval = _SUCCESS;
- /* struct recv_priv *precvpriv = &padapter->recvpriv; */
- /* struct rx_pkt_attrib *pattrib = &prframe->attrib; */
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (phtpriv->ht_option) { /* B/G/N Mode */
- /* prframe->preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
-
- if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
- /* including perform A-MPDU Rx Ordering Buffer Control */
- if ((!padapter->bDriverStopped) &&
- (!padapter->bSurpriseRemoved)) {
- retval = _FAIL;
- return retval;
- }
- }
- } else { /* B/G mode */
- retval = wlanhdr_to_ethhdr(prframe);
- if (retval != _SUCCESS)
- return retval;
-
- if ((!padapter->bDriverStopped) &&
- (!padapter->bSurpriseRemoved)) {
- /* indicate this recv_frame */
- rtw_recv_indicatepkt(padapter, prframe);
- } else {
- retval = _FAIL;
- return retval;
- }
- }
-
- return retval;
-}
-
-static int recv_func_prehandle(struct adapter *padapter, struct recv_frame *rframe)
-{
- int ret = _SUCCESS;
- struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
- /* check the frame crtl field and decache */
- ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS)
- rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
-
- return ret;
-}
-
-static int recv_func_posthandle(struct adapter *padapter, struct recv_frame *prframe)
-{
- int ret = _SUCCESS;
- struct recv_frame *orig_prframe = prframe;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
- /* DATA FRAME */
- rtw_led_control(padapter, LED_CTL_RX);
-
- prframe = decryptor(padapter, prframe);
- if (!prframe) {
- ret = _FAIL;
- goto _recv_data_drop;
- }
-
- prframe = recvframe_chk_defrag(padapter, prframe);
- if (!prframe)
- goto _recv_data_drop;
-
- prframe = portctrl(padapter, prframe);
- if (!prframe) {
- ret = _FAIL;
- goto _recv_data_drop;
- }
-
- count_rx_stats(padapter, prframe, NULL);
-
- ret = process_recv_indicatepkts(padapter, prframe);
- if (ret != _SUCCESS) {
- rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
- goto _recv_data_drop;
- }
- return ret;
-
-_recv_data_drop:
- precvpriv->rx_drop++;
- return ret;
-}
-
-static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
-{
- int ret;
- struct rx_pkt_attrib *prxattrib = &rframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct recv_priv *recvpriv = &padapter->recvpriv;
-
- /* check if need to handle uc_swdec_pending_queue*/
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
- psecuritypriv->busetkipkey) {
- struct recv_frame *pending_frame;
-
- while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue)))
- recv_func_posthandle(padapter, pending_frame);
- }
-
- ret = recv_func_prehandle(padapter, rframe);
-
- if (ret == _SUCCESS) {
- /* check if need to enqueue into uc_swdec_pending_queue*/
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
- !is_multicast_ether_addr(prxattrib->ra) && prxattrib->encrypt > 0 &&
- (prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt) &&
- psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPAPSK &&
- !psecuritypriv->busetkipkey) {
- rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
- if (recvpriv->free_recvframe_cnt < NR_RECVFRAME / 4) {
- /*
- * to prevent from recvframe starvation,
- * get recvframe from uc_swdec_pending_queue to
- * free_recvframe_cnt
- */
- rframe = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue);
- if (rframe)
- goto do_posthandle;
- }
- goto exit;
- }
-do_posthandle:
- ret = recv_func_posthandle(padapter, rframe);
- }
-
-exit:
- return ret;
-}
-
-s32 rtw_recv_entry(struct recv_frame *precvframe)
-{
- struct adapter *padapter;
- struct recv_priv *precvpriv;
- s32 ret = _SUCCESS;
-
- padapter = precvframe->adapter;
-
- precvpriv = &padapter->recvpriv;
-
- ret = recv_func(padapter, precvframe);
- if (ret == _FAIL)
- goto _recv_entry_drop;
-
- precvpriv->rx_pkts++;
-
- return ret;
-
-_recv_entry_drop:
-
- return ret;
-}
-
-static void rtw_signal_stat_timer_hdl(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, recvpriv.signal_stat_timer);
- struct recv_priv *recvpriv = &adapter->recvpriv;
-
- u32 tmp_s, tmp_q;
- u8 avg_signal_strength = 0;
- u8 avg_signal_qual = 0;
- u8 _alpha = 3; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
-
- if (adapter->recvpriv.is_signal_dbg) {
- /* update the user specific value, signal_strength_dbg, to signal_strength, rssi */
- adapter->recvpriv.signal_strength = adapter->recvpriv.signal_strength_dbg;
- adapter->recvpriv.rssi = (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
- } else {
- if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
- avg_signal_strength = recvpriv->signal_strength_data.avg_val;
- /* after avg_vals are acquired, we can re-stat the signal values */
- recvpriv->signal_strength_data.update_req = 1;
- }
-
- if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
- avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- /* after avg_vals are acquired, we can re-stat the signal values */
- recvpriv->signal_qual_data.update_req = 1;
- }
-
- /* update value of signal_strength, rssi, signal_qual */
- if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
- tmp_s = (avg_signal_strength + (_alpha - 1) * recvpriv->signal_strength);
- if (tmp_s % _alpha)
- tmp_s = tmp_s / _alpha + 1;
- else
- tmp_s = tmp_s / _alpha;
- if (tmp_s > 100)
- tmp_s = 100;
-
- tmp_q = (avg_signal_qual + (_alpha - 1) * recvpriv->signal_qual);
- if (tmp_q % _alpha)
- tmp_q = tmp_q / _alpha + 1;
- else
- tmp_q = tmp_q / _alpha;
- if (tmp_q > 100)
- tmp_q = 100;
-
- recvpriv->signal_strength = tmp_s;
- recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
- recvpriv->signal_qual = tmp_q;
- }
- }
- rtw_set_signal_stat_timer(recvpriv);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_rf.c b/drivers/staging/r8188eu/core/rtw_rf.c
deleted file mode 100644
index 2d2f0fc4c942..000000000000
--- a/drivers/staging/r8188eu/core/rtw_rf.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-static const u32 ch_freq_map[] = {
- 2412,
- 2417,
- 2422,
- 2427,
- 2432,
- 2437,
- 2442,
- 2447,
- 2452,
- 2457,
- 2462,
- 2467,
- 2472,
- 2484
-};
-
-u32 rtw_ch2freq(u32 channel)
-{
- if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
- return 2412;
-
- return ch_freq_map[channel - 1];
-}
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
deleted file mode 100644
index 780019ce1b98..000000000000
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ /dev/null
@@ -1,1374 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_SECURITY_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/osdep_intf.h"
-
-/* WEP related ===== */
-
-/*
- Need to consider the fragment situation
-*/
-void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{ /* exclude ICV */
- union {
- __le32 f0;
- u8 f1[4];
- } crc;
-
- int curfragnum, length;
- u32 keylength;
-
- u8 *pframe, *payload, *iv; /* wepkey */
- u8 wepkey[16];
- u8 hw_hdr_offset = 0;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
-
- if (!pxmitframe->buf_addr)
- return;
-
- hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- /* start to encrypt each fragment */
- if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
- keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- iv = pframe + pattrib->hdrlen;
- memcpy(&wepkey[0], iv, 3);
- memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
- payload = pframe + pattrib->iv_len + pattrib->hdrlen;
-
- if ((curfragnum + 1) == pattrib->nr_frags) { /* the last fragment */
- length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
-
- crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
-
- arc4_setkey(ctx, wepkey, 3 + keylength);
- arc4_crypt(ctx, payload, payload, length);
- arc4_crypt(ctx, payload + length, crc.f1, 4);
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
- crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
- arc4_setkey(ctx, wepkey, 3 + keylength);
- arc4_crypt(ctx, payload, payload, length);
- arc4_crypt(ctx, payload + length, crc.f1, 4);
-
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
- }
-
-}
-
-void rtw_wep_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
-{
- /* exclude ICV */
- int length;
- u32 keylength;
- u8 *pframe, *payload, *iv, wepkey[16];
- u8 keyindex;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
-
- pframe = precvframe->rx_data;
-
- /* start to decrypt recvframe */
- if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
- iv = pframe + prxattrib->hdrlen;
- keyindex = prxattrib->key_index;
- keylength = psecuritypriv->dot11DefKeylen[keyindex];
- memcpy(&wepkey[0], iv, 3);
- memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
- length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
-
- payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
-
- /* decrypt payload include icv */
- arc4_setkey(ctx, wepkey, 3 + keylength);
- arc4_crypt(ctx, payload, payload, length);
- }
-}
-
-/* 3 ===== TKIP related ===== */
-
-static u32 secmicgetuint32(u8 *p)
-/* Convert from Byte[] to Us3232 in a portable way */
-{
- s32 i;
- u32 res = 0;
-
- for (i = 0; i < 4; i++)
- res |= ((u32)(*p++)) << (8 * i);
-
- return res;
-}
-
-static void secmicputuint32(u8 *p, u32 val)
-/* Convert from Us3232 to Byte[] in a portable way */
-{
- long i;
-
- for (i = 0; i < 4; i++) {
- *p++ = (u8)(val & 0xff);
- val >>= 8;
- }
-
-}
-
-static void secmicclear(struct mic_data *pmicdata)
-{
-/* Reset the state to the empty message. */
-
- pmicdata->L = pmicdata->K0;
- pmicdata->R = pmicdata->K1;
- pmicdata->nBytesInM = 0;
- pmicdata->M = 0;
-
-}
-
-void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
-{
- /* Set the key */
-
- pmicdata->K0 = secmicgetuint32(key);
- pmicdata->K1 = secmicgetuint32(key + 4);
- /* and reset the message */
- secmicclear(pmicdata);
-
-}
-
-void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
-{
-
- /* Append the byte to our word-sized buffer */
- pmicdata->M |= ((unsigned long)b) << (8 * pmicdata->nBytesInM);
- pmicdata->nBytesInM++;
- /* Process the word if it is full. */
- if (pmicdata->nBytesInM >= 4) {
- pmicdata->L ^= pmicdata->M;
- pmicdata->R ^= ROL32(pmicdata->L, 17);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ROL32(pmicdata->L, 3);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ROR32(pmicdata->L, 2);
- pmicdata->L += pmicdata->R;
- /* Clear the buffer */
- pmicdata->M = 0;
- pmicdata->nBytesInM = 0;
- }
-
-}
-
-void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
-{
-
- /* This is simple */
- while (nbytes > 0) {
- rtw_secmicappendbyte(pmicdata, *src++);
- nbytes--;
- }
-
-}
-
-void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
-{
-
- /* Append the minimum padding */
- rtw_secmicappendbyte(pmicdata, 0x5a);
- rtw_secmicappendbyte(pmicdata, 0);
- rtw_secmicappendbyte(pmicdata, 0);
- rtw_secmicappendbyte(pmicdata, 0);
- rtw_secmicappendbyte(pmicdata, 0);
- /* and then zeroes until the length is a multiple of 4 */
- while (pmicdata->nBytesInM != 0)
- rtw_secmicappendbyte(pmicdata, 0);
- /* The appendByte function has already computed the result. */
- secmicputuint32(dst, pmicdata->L);
- secmicputuint32(dst + 4, pmicdata->R);
- /* Reset to the empty message. */
- secmicclear(pmicdata);
-
-}
-
-void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
-{
- struct mic_data micdata;
- u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
-
- rtw_secmicsetkey(&micdata, key);
- priority[0] = pri;
-
- /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
- if (header[1] & 1) { /* ToDS == 1 */
- rtw_secmicappend(&micdata, &header[16], 6); /* DA */
- if (header[1] & 2) /* From Ds == 1 */
- rtw_secmicappend(&micdata, &header[24], 6);
- else
- rtw_secmicappend(&micdata, &header[10], 6);
- } else { /* ToDS == 0 */
- rtw_secmicappend(&micdata, &header[4], 6); /* DA */
- if (header[1] & 2) /* From Ds == 1 */
- rtw_secmicappend(&micdata, &header[16], 6);
- else
- rtw_secmicappend(&micdata, &header[10], 6);
- }
- rtw_secmicappend(&micdata, &priority[0], 4);
-
- rtw_secmicappend(&micdata, data, data_len);
-
- rtw_secgetmic(&micdata, mic_code);
-
-}
-
-/* macros for extraction/creation of unsigned char/unsigned short values */
-#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
-#define Lo8(v16) ((u8)((v16) & 0x00FF))
-#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
-#define Lo16(v32) ((u16)((v32) & 0xFFFF))
-#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
-#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
-
-/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
-#define TK16(N) Mk16(tk[2 * (N) + 1], tk[2 * (N)])
-
-/* S-box lookup: 16 bits --> 16 bits */
-#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
-
-/* fixed algorithm "parameters" */
-#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
-#define TA_SIZE 6 /* 48-bit transmitter address */
-#define TK_SIZE 16 /* 128-bit temporal key */
-#define P1K_SIZE 10 /* 80-bit Phase1 key */
-#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
-
-/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
-static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM) */
-{
- 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
- 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
- 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
- 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
- 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
- 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
- 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
- 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
- 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
- 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
- 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
- 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
- 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
- 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
- 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
- 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
- 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
- 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
- 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
- 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
- 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
- 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
- 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
- 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
- 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
- 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
- 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
- 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
- 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
- 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
- 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
- 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
- },
-
- { /* second half of table is unsigned char-reversed version of first! */
- 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
- 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
- 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
- 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
- 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
- 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
- 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
- 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
- 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
- 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
- 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
- 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
- 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
- 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
- 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
- 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
- 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
- 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
- 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
- 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
- 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
- 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
- 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
- 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
- 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
- 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
- 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
- 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
- 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
- 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
- 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
- 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
- }
-};
-
- /*
-**********************************************************************
-* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
-*
-* Inputs:
-* tk[] = temporal key [128 bits]
-* ta[] = transmitter's MAC address [ 48 bits]
-* iv32 = upper 32 bits of IV [ 32 bits]
-* Output:
-* p1k[] = Phase 1 key [ 80 bits]
-*
-* Note:
-* This function only needs to be called every 2**16 packets,
-* although in theory it could be called every packet.
-*
-**********************************************************************
-*/
-static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
-{
- int i;
-
- /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
- p1k[0] = Lo16(iv32);
- p1k[1] = Hi16(iv32);
- p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
- p1k[3] = Mk16(ta[3], ta[2]);
- p1k[4] = Mk16(ta[5], ta[4]);
-
- /* Now compute an unbalanced Feistel cipher with 80-bit block */
- /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
- for (i = 0; i < PHASE1_LOOP_CNT; i++) { /* Each add operation here is mod 2**16 */
- p1k[0] += _S_(p1k[4] ^ TK16((i & 1) + 0));
- p1k[1] += _S_(p1k[0] ^ TK16((i & 1) + 2));
- p1k[2] += _S_(p1k[1] ^ TK16((i & 1) + 4));
- p1k[3] += _S_(p1k[2] ^ TK16((i & 1) + 6));
- p1k[4] += _S_(p1k[3] ^ TK16((i & 1) + 0));
- p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
- }
-
-}
-
-/*
-**********************************************************************
-* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
-*
-* Inputs:
-* tk[] = Temporal key [128 bits]
-* p1k[] = Phase 1 output key [ 80 bits]
-* iv16 = low 16 bits of IV counter [ 16 bits]
-* Output:
-* rc4key[] = the key used to encrypt the packet [128 bits]
-*
-* Note:
-* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
-* across all packets using the same key TK value. Then, for a
-* given value of TK[], this TKIP48 construction guarantees that
-* the final RC4KEY value is unique across all packets.
-*
-* Suggested implementation optimization: if PPK[] is "overlaid"
-* appropriately on RC4KEY[], there is no need for the final
-* for loop below that copies the PPK[] result into RC4KEY[].
-*
-**********************************************************************
-*/
-static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
-{
- int i;
- u16 PPK[6]; /* temporary key for mixing */
-
- /* Note: all adds in the PPK[] equations below are mod 2**16 */
- for (i = 0; i < 5; i++)
- PPK[i] = p1k[i]; /* first, copy P1K to PPK */
- PPK[5] = p1k[4] + iv16; /* next, add in IV16 */
-
- /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
- PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
- PPK[1] += _S_(PPK[0] ^ TK16(1));
- PPK[2] += _S_(PPK[1] ^ TK16(2));
- PPK[3] += _S_(PPK[2] ^ TK16(3));
- PPK[4] += _S_(PPK[3] ^ TK16(4));
- PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
-
- /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
- PPK[0] += RotR1(PPK[5] ^ TK16(6));
- PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
- PPK[2] += RotR1(PPK[1]);
- PPK[3] += RotR1(PPK[2]);
- PPK[4] += RotR1(PPK[3]);
- PPK[5] += RotR1(PPK[4]);
- /* Note: At this point, for a given key TK[0..15], the 96-bit output */
- /* value PPK[0..5] is guaranteed to be unique, as a function */
- /* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
- /* is now a keyed permutation of {TA, IV32, IV16}. */
-
- /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
- rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
- rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
- rc4key[2] = Lo8(iv16);
- rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
-
- /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
- for (i = 0; i < 6; i++) {
- rc4key[4 + 2 * i] = Lo8(PPK[i]);
- rc4key[5 + 2 * i] = Hi8(PPK[i]);
- }
-
-}
-
-/* The hlen isn't include the IV */
-u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{ /* exclude ICV */
- u16 pnl;
- u32 pnh;
- u8 rc4key[16];
- u8 ttkey[16];
- union {
- __le32 f0;
- u8 f1[4];
- } crc;
- u8 hw_hdr_offset = 0;
- int curfragnum, length;
-
- u8 *pframe, *payload, *iv, *prwskey;
- union pn48 dot11txpn;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
- u32 res = _SUCCESS;
-
- if (!pxmitframe->buf_addr)
- return _FAIL;
-
- hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- /* 4 start to encrypt each fragment */
- if (pattrib->encrypt == _TKIP_) {
- if (pattrib->psta)
- stainfo = pattrib->psta;
- else
- stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
-
- if (stainfo) {
- if (is_multicast_ether_addr(pattrib->ra))
- prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
- else
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- iv = pframe + pattrib->hdrlen;
- payload = pframe + pattrib->iv_len + pattrib->hdrlen;
-
- GET_TKIP_PN(iv, dot11txpn);
-
- pnl = (u16)(dot11txpn.val);
- pnh = (u32)(dot11txpn.val >> 16);
- phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
- phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
-
- if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */
- length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
- crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
-
- arc4_setkey(ctx, rc4key, 16);
- arc4_crypt(ctx, payload, payload, length);
- arc4_crypt(ctx, payload + length, crc.f1, 4);
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
- crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
-
- arc4_setkey(ctx, rc4key, 16);
- arc4_crypt(ctx, payload, payload, length);
- arc4_crypt(ctx, payload + length, crc.f1, 4);
-
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
- } else {
- res = _FAIL;
- }
- }
-
- return res;
-}
-
-/* The hlen isn't include the IV */
-u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
-{ /* exclude ICV */
- u16 pnl;
- u32 pnh;
- u8 rc4key[16];
- u8 ttkey[16];
- union {
- __le32 f0;
- u8 f1[4];
- } crc;
- int length;
-
- u8 *pframe, *payload, *iv, *prwskey;
- union pn48 dot11txpn;
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
- u32 res = _SUCCESS;
-
- pframe = precvframe->rx_data;
-
- /* 4 start to decrypt recvframe */
- if (prxattrib->encrypt == _TKIP_) {
- stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
- if (stainfo) {
- if (is_multicast_ether_addr(prxattrib->ra)) {
- if (!psecuritypriv->binstallGrpkey) {
- res = _FAIL;
- goto exit;
- }
- prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- } else {
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- }
-
- iv = pframe + prxattrib->hdrlen;
- payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
- length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
-
- GET_TKIP_PN(iv, dot11txpn);
-
- pnl = (u16)(dot11txpn.val);
- pnh = (u32)(dot11txpn.val >> 16);
-
- phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
- phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
-
- /* 4 decrypt payload include icv */
-
- arc4_setkey(ctx, rc4key, 16);
- arc4_crypt(ctx, payload, payload, length);
-
- crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
-
- if (crc.f1[3] != payload[length - 1] ||
- crc.f1[2] != payload[length - 2] ||
- crc.f1[1] != payload[length - 3] ||
- crc.f1[0] != payload[length - 4])
- res = _FAIL;
- } else {
- res = _FAIL;
- }
- }
-
-exit:
- return res;
-}
-
-/* 3 ===== AES related ===== */
-
-#define MAX_MSG_SIZE 2048
-/*****************************/
-/******** SBOX Table *********/
-/*****************************/
-
-static u8 sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
- 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
- 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
- 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
- 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
- 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
- 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
- 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
- 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
- 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
- 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
- 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
- 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
- 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
- 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
- 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
- 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-};
-
-/*****************************/
-/**** Function Prototypes ****/
-/*****************************/
-
-static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
-static void construct_mic_iv(u8 *mic_header1, int qc_exists, int a4_exists, u8 *mpdu, uint payload_length, u8 *pn_vector);
-static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu);
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists);
-static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c);
-static void xor_128(u8 *a, u8 *b, u8 *out);
-static void xor_32(u8 *a, u8 *b, u8 *out);
-static u8 sbox(u8 a);
-static void next_key(u8 *key, int round);
-static void byte_sub(u8 *in, u8 *out);
-static void shift_row(u8 *in, u8 *out);
-static void mix_column(u8 *in, u8 *out);
-static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
-
-/****************************************/
-/* aes128k128d() */
-/* Performs a 128 bit AES encrypt with */
-/* 128 bit data. */
-/****************************************/
-static void xor_128(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = a[i] ^ b[i];
-
-}
-
-static void xor_32(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 4; i++)
- out[i] = a[i] ^ b[i];
-
-}
-
-static u8 sbox(u8 a)
-{
- return sbox_table[(int)a];
-}
-
-static void next_key(u8 *key, int round)
-{
- u8 rcon;
- u8 sbox_key[4];
- u8 rcon_table[12] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- 0x1b, 0x36, 0x36, 0x36
- };
-
- sbox_key[0] = sbox(key[13]);
- sbox_key[1] = sbox(key[14]);
- sbox_key[2] = sbox(key[15]);
- sbox_key[3] = sbox(key[12]);
-
- rcon = rcon_table[round];
-
- xor_32(&key[0], sbox_key, &key[0]);
- key[0] = key[0] ^ rcon;
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-
-}
-
-static void byte_sub(u8 *in, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox(in[i]);
-
-}
-
-static void shift_row(u8 *in, u8 *out)
-{
-
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-
-}
-
-static void mix_column(u8 *in, u8 *out)
-{
- int i;
- u8 add1b[4];
- u8 add1bf7[4];
- u8 rotl[4];
- u8 swap_halfs[4];
- u8 andf7[4];
- u8 rotr[4];
- u8 temp[4];
- u8 tempb[4];
-
- for (i = 0 ; i < 4; i++) {
- if ((in[i] & 0x80) == 0x80)
- add1b[i] = 0x1b;
- else
- add1b[i] = 0x00;
- }
-
- swap_halfs[0] = in[2]; /* Swap halves */
- swap_halfs[1] = in[3];
- swap_halfs[2] = in[0];
- swap_halfs[3] = in[1];
-
- rotl[0] = in[3]; /* Rotate left 8 bits */
- rotl[1] = in[0];
- rotl[2] = in[1];
- rotl[3] = in[2];
-
- andf7[0] = in[0] & 0x7f;
- andf7[1] = in[1] & 0x7f;
- andf7[2] = in[2] & 0x7f;
- andf7[3] = in[3] & 0x7f;
-
- for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
- andf7[i] = andf7[i] << 1;
- if ((andf7[i - 1] & 0x80) == 0x80)
- andf7[i] = (andf7[i] | 0x01);
- }
- andf7[0] = andf7[0] << 1;
- andf7[0] = andf7[0] & 0xfe;
-
- xor_32(add1b, andf7, add1bf7);
-
- xor_32(in, add1bf7, rotr);
-
- temp[0] = rotr[0]; /* Rotate right 8 bits */
- rotr[0] = rotr[1];
- rotr[1] = rotr[2];
- rotr[2] = rotr[3];
- rotr[3] = temp[0];
-
- xor_32(add1bf7, rotr, temp);
- xor_32(swap_halfs, rotl, tempb);
- xor_32(temp, tempb, out);
-
-}
-
-static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
-{
- int round;
- int i;
- u8 intermediatea[16];
- u8 intermediateb[16];
- u8 round_key[16];
-
- for (i = 0; i < 16; i++)
- round_key[i] = key[i];
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(round_key, data, ciphertext);
- next_key(round_key, round);
- } else if (round == 10) {
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- xor_128(intermediateb, round_key, ciphertext);
- } else { /* 1 - 9 */
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- mix_column(&intermediateb[0], &intermediatea[0]);
- mix_column(&intermediateb[4], &intermediatea[4]);
- mix_column(&intermediateb[8], &intermediatea[8]);
- mix_column(&intermediateb[12], &intermediatea[12]);
- xor_128(intermediatea, round_key, ciphertext);
- next_key(round_key, round);
- }
- }
-
-}
-
-/************************************************/
-/* construct_mic_iv() */
-/* Builds the MIC IV from header fields and PN */
-/************************************************/
-static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
- uint payload_length, u8 *pn_vector)
-{
- int i;
-
- mic_iv[0] = 0x59;
- if (qc_exists && a4_exists)
- mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
- if (qc_exists && !a4_exists)
- mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
- if (!qc_exists)
- mic_iv[1] = 0x00;
- for (i = 2; i < 8; i++)
- mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
- mic_iv[14] = (unsigned char)(payload_length / 256);
- mic_iv[15] = (unsigned char)(payload_length % 256);
-
-}
-
-/************************************************/
-/* construct_mic_header1() */
-/* Builds the first MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
-{
-
- mic_header1[0] = (u8)((header_length - 2) / 256);
- mic_header1[1] = (u8)((header_length - 2) % 256);
- mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
- mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
- mic_header1[4] = mpdu[4]; /* A1 */
- mic_header1[5] = mpdu[5];
- mic_header1[6] = mpdu[6];
- mic_header1[7] = mpdu[7];
- mic_header1[8] = mpdu[8];
- mic_header1[9] = mpdu[9];
- mic_header1[10] = mpdu[10]; /* A2 */
- mic_header1[11] = mpdu[11];
- mic_header1[12] = mpdu[12];
- mic_header1[13] = mpdu[13];
- mic_header1[14] = mpdu[14];
- mic_header1[15] = mpdu[15];
-
-}
-
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- mic_header2[i] = 0x00;
-
- mic_header2[0] = mpdu[16]; /* A3 */
- mic_header2[1] = mpdu[17];
- mic_header2[2] = mpdu[18];
- mic_header2[3] = mpdu[19];
- mic_header2[4] = mpdu[20];
- mic_header2[5] = mpdu[21];
-
- mic_header2[6] = 0x00;
- mic_header2[7] = 0x00; /* mpdu[23]; */
-
- if (!qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8 + i] = mpdu[24 + i]; /* A4 */
- }
-
- if (qc_exists && !a4_exists) {
- mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
- mic_header2[9] = mpdu[25] & 0x00;
- }
-
- if (qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8 + i] = mpdu[24 + i]; /* A4 */
-
- mic_header2[14] = mpdu[30] & 0x0f;
- mic_header2[15] = mpdu[31] & 0x00;
- }
-
-}
-
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- ctr_preload[i] = 0x00;
- i = 0;
-
- ctr_preload[0] = 0x01; /* flag */
- if (qc_exists && a4_exists)
- ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
- if (qc_exists && !a4_exists)
- ctr_preload[1] = mpdu[24] & 0x0f;
-
- for (i = 2; i < 8; i++)
- ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
- ctr_preload[14] = (unsigned char)(c / 256); /* Ctr */
- ctr_preload[15] = (unsigned char)(c % 256);
-
-}
-
-/************************************/
-/* bitwise_xor() */
-/* A 128 bit, bitwise exclusive or */
-/************************************/
-static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = ina[i] ^ inb[i];
-
-}
-
-static void aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
-{
- uint qc_exists, a4_exists, i, j, payload_remainder,
- num_blocks, payload_index;
-
- u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
-
- /* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
- u8 mic[8];
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
-
- frsubtype = frsubtype >> 4;
-
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
- if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
- a4_exists = 0;
- else
- a4_exists = 1;
-
- if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
- } else {
- qc_exists = 0;
- }
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
-
- construct_mic_header1(mic_header1, hdrlen, pframe);
- construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
-
- payload_remainder = plen % 16;
- num_blocks = plen / 16;
-
- /* Find start of payload */
- payload_index = (hdrlen + 8);
-
- /* Calculate MIC */
- aes128k128d(key, mic_iv, aes_out);
- bitwise_xor(aes_out, mic_header1, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- bitwise_xor(aes_out, mic_header2, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
-
- for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
-
- payload_index += 16;
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- /* Add on the final payload block if it needs padding */
- if (payload_remainder > 0) {
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- for (j = 0; j < 8; j++)
- mic[j] = aes_out[j];
-
- /* Insert MIC into payload */
- for (j = 0; j < 8; j++)
- pframe[payload_index + j] = mic[j]; /* message[payload_index+j] = mic[j]; */
-
- payload_index = hdrlen + 8;
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i + 1);
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
- /* encrypt it and copy the unpadded part back */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
- /* Encrypt the MIC */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, 0);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j + hdrlen + 8 + plen];
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];
-}
-
-u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{ /* exclude ICV */
-
- /*static*/
-/* unsigned char message[MAX_MSG_SIZE]; */
-
- /* Intermediate Buffers */
- int curfragnum, length;
- u8 *pframe, *prwskey; /* *payload,*iv */
- u8 hw_hdr_offset = 0;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
-/* uint offset = 0; */
- u32 res = _SUCCESS;
-
- if (!pxmitframe->buf_addr)
- return _FAIL;
-
- hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- /* 4 start to encrypt each fragment */
- if (pattrib->encrypt == _AES_) {
- if (pattrib->psta)
- stainfo = pattrib->psta;
- else
- stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
-
- if (stainfo) {
- if (is_multicast_ether_addr(pattrib->ra))
- prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
- else
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */
- length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
-
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
-
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
- } else {
- res = _FAIL;
- }
- }
-
- return res;
-}
-
-static int aes_decipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
-{
- static u8 message[MAX_MSG_SIZE];
- uint qc_exists, a4_exists, i, j, payload_remainder,
- num_blocks, payload_index;
- int res = _SUCCESS;
- u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
-
- /* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
- u8 mic[8];
-
-/* uint offset = 0; */
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
-
- frsubtype = frsubtype >> 4;
-
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
- /* start to decrypt the payload */
-
- num_blocks = (plen - 8) / 16; /* plen including llc, payload_length and mic) */
-
- payload_remainder = (plen - 8) % 16;
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
- a4_exists = 0;
- else
- a4_exists = 1;
-
- if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) ||
- (frtype == WIFI_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- } else if ((frsubtype == 0x08) || (frsubtype == 0x09) ||
- (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
- } else {
- qc_exists = 0;
- }
-
- /* now, decrypt pframe with hdrlen offset and plen long */
-
- payload_index = hdrlen + 8; /* 8 is for extiv */
-
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i + 1);
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
- /* encrypt it and copy the unpadded part back */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- /* start to calculate the mic */
- if ((hdrlen + plen + 8) <= MAX_MSG_SIZE)
- memcpy(message, pframe, (hdrlen + plen + 8)); /* 8 is for ext iv len */
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
- construct_mic_iv(mic_iv, qc_exists, a4_exists, message, plen - 8, pn_vector);
-
- construct_mic_header1(mic_header1, hdrlen, message);
- construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
-
- payload_remainder = (plen - 8) % 16;
- num_blocks = (plen - 8) / 16;
-
- /* Find start of payload */
- payload_index = (hdrlen + 8);
-
- /* Calculate MIC */
- aes128k128d(key, mic_iv, aes_out);
- bitwise_xor(aes_out, mic_header1, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- bitwise_xor(aes_out, mic_header2, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
-
- for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &message[payload_index], chain_buffer);
-
- payload_index += 16;
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- /* Add on the final payload block if it needs padding */
- if (payload_remainder > 0) {
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = message[payload_index++];
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- for (j = 0 ; j < 8; j++)
- mic[j] = aes_out[j];
-
- /* Insert MIC into payload */
- for (j = 0; j < 8; j++)
- message[payload_index + j] = mic[j];
-
- payload_index = hdrlen + 8;
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, i + 1);
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &message[payload_index], chain_buffer);
- for (j = 0; j < 16; j++)
- message[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
- /* encrypt it and copy the unpadded part back */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = message[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- message[payload_index++] = chain_buffer[j];
- }
-
- /* Encrypt the MIC */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, 0);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < 8; j++)
- padded_buffer[j] = message[j + hdrlen + 8 + plen - 8];
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < 8; j++)
- message[payload_index++] = chain_buffer[j];
-
- /* compare the mic */
- for (i = 0; i < 8; i++) {
- if (pframe[hdrlen + 8 + plen - 8 + i] != message[hdrlen + 8 + plen - 8 + i])
- res = _FAIL;
- }
-
- return res;
-}
-
-u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
-{ /* exclude ICV */
- /* Intermediate Buffers */
- int length;
- u8 *pframe, *prwskey; /* *payload,*iv */
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- u32 res = _SUCCESS;
-
- pframe = precvframe->rx_data;
-
- /* 4 start to encrypt each fragment */
- if (prxattrib->encrypt == _AES_) {
- stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
- if (stainfo) {
- if (is_multicast_ether_addr(prxattrib->ra)) {
- /* in concurrent we should use sw descrypt in group key, so we remove this message */
- if (!psecuritypriv->binstallGrpkey) {
- res = _FAIL;
- goto exit;
- }
- prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
- res = _FAIL;
- goto exit;
- }
- } else {
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- }
- length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
- res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
- } else {
- res = _FAIL;
- }
- }
-
-exit:
- return res;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
deleted file mode 100644
index e1ae1859686e..000000000000
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ /dev/null
@@ -1,490 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_STA_MGT_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/sta_info.h"
-
-static void _rtw_init_stainfo(struct sta_info *psta)
-{
-
- memset((u8 *)psta, 0, sizeof(struct sta_info));
-
- spin_lock_init(&psta->lock);
- INIT_LIST_HEAD(&psta->list);
- INIT_LIST_HEAD(&psta->hash_list);
- rtw_init_queue(&psta->sleep_q);
- psta->sleepq_len = 0;
-
- _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
- _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
-
- INIT_LIST_HEAD(&psta->asoc_list);
-
- INIT_LIST_HEAD(&psta->auth_list);
-
- psta->expire_to = 0;
-
- psta->flags = 0;
-
- psta->capability = 0;
-
- psta->bpairwise_key_installed = false;
-
- psta->nonerp_set = 0;
- psta->no_short_slot_time_set = 0;
- psta->no_short_preamble_set = 0;
- psta->no_ht_gf_set = 0;
- psta->no_ht_set = 0;
- psta->ht_20mhz_set = 0;
-
- psta->under_exist_checking = 0;
-
- psta->keep_alive_trycnt = 0;
-}
-
-int _rtw_init_sta_priv(struct sta_priv *pstapriv)
-{
- struct sta_info *psta;
- s32 i;
-
- pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA + 4);
-
- if (!pstapriv->pallocated_stainfo_buf)
- return -ENOMEM;
-
- pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
- ((size_t)(pstapriv->pallocated_stainfo_buf) & 3);
-
- rtw_init_queue(&pstapriv->free_sta_queue);
-
- spin_lock_init(&pstapriv->sta_hash_lock);
-
- pstapriv->asoc_sta_count = 0;
- rtw_init_queue(&pstapriv->sleep_q);
- rtw_init_queue(&pstapriv->wakeup_q);
-
- psta = (struct sta_info *)(pstapriv->pstainfo_buf);
-
- for (i = 0; i < NUM_STA; i++) {
- _rtw_init_stainfo(psta);
-
- INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
-
- list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
-
- psta++;
- }
-
- pstapriv->sta_dz_bitmap = 0;
- pstapriv->tim_bitmap = 0;
-
- INIT_LIST_HEAD(&pstapriv->asoc_list);
- INIT_LIST_HEAD(&pstapriv->auth_list);
- spin_lock_init(&pstapriv->asoc_list_lock);
- spin_lock_init(&pstapriv->auth_list_lock);
- pstapriv->asoc_list_cnt = 0;
- pstapriv->auth_list_cnt = 0;
-
- pstapriv->auth_to = 3; /* 3*2 = 6 sec */
- pstapriv->assoc_to = 3;
- pstapriv->expire_to = 3; /* 3*2 = 6 sec */
- pstapriv->max_num_sta = NUM_STA;
-
- return 0;
-}
-
-inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
-{
- return (((u8 *)sta) - stapriv->pstainfo_buf) / sizeof(struct sta_info);
-}
-
-inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
-{
- return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
-}
-
-void _rtw_free_sta_priv(struct sta_priv *pstapriv)
-{
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
- struct recv_reorder_ctrl *preorder_ctrl;
- int index;
-
- if (pstapriv) {
- /* delete all reordering_ctrl_timer */
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
- int i;
- psta = container_of(plist, struct sta_info, hash_list);
- plist = plist->next;
-
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
- }
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- /*===============================*/
-
- vfree(pstapriv->pallocated_stainfo_buf);
- }
-}
-
-static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
-{
- struct recv_reorder_ctrl *preorder_ctrl;
-
- preorder_ctrl = from_timer(preorder_ctrl, t, reordering_ctrl_timer);
- rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
-}
-
-static void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
-{
- timer_setup(&preorder_ctrl->reordering_ctrl_timer, _rtw_reordering_ctrl_timeout_handler, 0);
-}
-
-static void _addba_timer_hdl(struct timer_list *t)
-{
- struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
-
- addba_timer_hdl(psta);
-}
-
-static void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
-{
- timer_setup(&psta->addba_retry_timer, _addba_timer_hdl, 0);
-}
-
-struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
-{
- s32 index;
- struct list_head *phash_list;
- struct sta_info *psta;
- struct __queue *pfree_sta_queue;
- struct recv_reorder_ctrl *preorder_ctrl;
- int i = 0;
- u16 wRxSeqInitialValue = 0xffff;
-
- pfree_sta_queue = &pstapriv->free_sta_queue;
-
- spin_lock_bh(&pfree_sta_queue->lock);
-
- if (list_empty(&pfree_sta_queue->queue)) {
- spin_unlock_bh(&pfree_sta_queue->lock);
- psta = NULL;
- } else {
- psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
- list_del_init(&psta->list);
- spin_unlock_bh(&pfree_sta_queue->lock);
- _rtw_init_stainfo(psta);
- memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
- index = wifi_mac_hash(hwaddr);
- if (index >= NUM_STA) {
- psta = NULL;
- goto exit;
- }
- phash_list = &pstapriv->sta_hash[index];
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- list_add_tail(&psta->hash_list, phash_list);
-
- pstapriv->asoc_sta_count++;
-
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
-/* Commented by Albert 2009/08/13 */
-/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
-/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
-/* So, we initialize the tid_rxseq variable as the 0xffff. */
-
- for (i = 0; i < 16; i++)
- memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
-
- init_addba_retry_timer(pstapriv->padapter, psta);
-
- /* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
-
- preorder_ctrl->padapter = pstapriv->padapter;
-
- preorder_ctrl->enable = false;
-
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* 64; */
-
- rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
-
- rtw_init_recv_timer(preorder_ctrl);
- }
-
- /* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
- psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
-
- /* init for the sequence number of received management frame */
- psta->RxMgmtFrameSeqNum = 0xffff;
- }
-
-exit:
-
- return psta;
-}
-
-/* using pstapriv->sta_hash_lock to protect */
-void rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
-{
- int i;
- struct __queue *pfree_sta_queue;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_xmit_priv *pstaxmitpriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- pfree_sta_queue = &pstapriv->free_sta_queue;
-
- pstaxmitpriv = &psta->sta_xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- rtw_free_xmitframe_list(pxmitpriv, get_list_head(&psta->sleep_q));
- psta->sleepq_len = 0;
-
- rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
-
- list_del_init(&pstaxmitpriv->vo_q.tx_pending);
-
- rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
-
- list_del_init(&pstaxmitpriv->vi_q.tx_pending);
-
- rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
-
- list_del_init(&pstaxmitpriv->bk_q.tx_pending);
-
- rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
-
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- list_del_init(&psta->hash_list);
- pstapriv->asoc_sta_count--;
-
- /* re-init sta_info; 20061114 */
- _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
- _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
-
- _cancel_timer_ex(&psta->addba_retry_timer);
-
- /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
- for (i = 0; i < 16 ; i++) {
- struct list_head *phead, *plist;
- struct recv_frame *prframe;
- struct __queue *ppending_recvframe_queue;
- struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
- preorder_ctrl = &psta->recvreorder_ctrl[i];
-
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
-
- ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
-
- phead = get_list_head(ppending_recvframe_queue);
- plist = phead->next;
-
- while (!list_empty(phead)) {
- prframe = container_of(plist, struct recv_frame, list);
-
- plist = plist->next;
-
- list_del_init(&prframe->list);
-
- rtw_free_recvframe(prframe, pfree_recv_queue);
- }
-
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- }
-
- if (!(psta->state & WIFI_AP_STATE))
- rtl8188e_SetHalODMVar(padapter, psta, false);
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (!list_empty(&psta->auth_list)) {
- list_del_init(&psta->auth_list);
- pstapriv->auth_list_cnt--;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- psta->expire_to = 0;
-
- psta->sleepq_ac_len = 0;
- psta->qos_info = 0;
-
- psta->max_sp_len = 0;
- psta->uapsd_bk = 0;
- psta->uapsd_be = 0;
- psta->uapsd_vi = 0;
- psta->uapsd_vo = 0;
- psta->has_legacy_ac = 0;
-
- pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
- pstapriv->sta_aid[psta->aid - 1] = NULL;
- psta->aid = 0;
- }
-
- psta->under_exist_checking = 0;
-
- spin_lock_bh(&pfree_sta_queue->lock);
- list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
- spin_unlock_bh(&pfree_sta_queue->lock);
-}
-
-/* free all stainfo which in sta_hash[all] */
-void rtw_free_all_stainfo(struct adapter *padapter)
-{
- struct list_head *plist, *phead;
- s32 index;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
-
- if (pstapriv->asoc_sta_count == 1)
- return;
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- plist = plist->next;
-
- if (pbcmc_stainfo != psta)
- rtw_free_stainfo(padapter, psta);
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-}
-
-/* any station allocated can be searched by hash list */
-struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
-{
- struct sta_info *ploop, *psta = NULL;
- u32 index;
- u8 *addr;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if (!hwaddr)
- return NULL;
-
- if (is_multicast_ether_addr(hwaddr))
- addr = bc_addr;
- else
- addr = hwaddr;
-
- index = wifi_mac_hash(addr);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- list_for_each_entry(ploop, &pstapriv->sta_hash[index], hash_list) {
- if (!memcmp(ploop->hwaddr, addr, ETH_ALEN)) {
- psta = ploop;
- break;
- }
- }
-
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- return psta;
-}
-
-u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
-{
- struct sta_info *psta;
- u32 res = _SUCCESS;
- unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
-
- if (!psta) {
- res = _FAIL;
- goto exit;
- }
-
- /* default broadcast & multicast use macid 1 */
- psta->mac_id = 1;
-
-exit:
-
- return res;
-}
-
-struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
-{
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- psta = rtw_get_stainfo(pstapriv, bc_addr);
-
- return psta;
-}
-
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
-{
- u8 res = true;
- struct list_head *plist, *phead;
- struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct __queue *pacl_node_q = &pacl_list->acl_node_q;
-
- spin_lock_bh(&pacl_node_q->lock);
- phead = get_list_head(pacl_node_q);
- plist = phead->next;
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
- if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
- if (paclnode->valid) {
- match = true;
- break;
- }
- }
- }
- spin_unlock_bh(&pacl_node_q->lock);
-
- if (pacl_list->mode == 1)/* accept unless in deny list */
- res = !match;
- else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = match;
- else
- res = true;
-
- return res;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
deleted file mode 100644
index f1ebb5358cb9..000000000000
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ /dev/null
@@ -1,1551 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_WLAN_UTIL_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-
-static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
-static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
-
-static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
-static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
-
-static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
-static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
-static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
-static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
-static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
-static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
-
-unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
-
-#define R2T_PHY_DELAY (0)
-
-/* define WAIT_FOR_BCN_TO_M (3000) */
-#define WAIT_FOR_BCN_TO_MIN (6000)
-#define WAIT_FOR_BCN_TO_MAX (20000)
-
-static u8 rtw_basic_rate_cck[4] = {
- IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
-};
-
-static u8 rtw_basic_rate_ofdm[3] = {
- IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
-};
-
-static u8 rtw_basic_rate_mix[7] = {
- IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
-};
-
-bool cckrates_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
- return true;
- }
- return false;
-}
-
-bool cckratesonly_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
- return false;
- }
-
- return true;
-}
-
-unsigned char networktype_to_raid(unsigned char network_type)
-{
- unsigned char raid;
-
- switch (network_type) {
- case WIRELESS_11B:
- raid = RATR_INX_WIRELESS_B;
- break;
- case WIRELESS_11G:
- raid = RATR_INX_WIRELESS_G;
- break;
- case WIRELESS_11BG:
- raid = RATR_INX_WIRELESS_GB;
- break;
- case WIRELESS_11_24N:
- raid = RATR_INX_WIRELESS_N;
- break;
- case WIRELESS_11G_24N:
- raid = RATR_INX_WIRELESS_NG;
- break;
- case WIRELESS_11BG_24N:
- raid = RATR_INX_WIRELESS_NGB;
- break;
- default:
- raid = RATR_INX_WIRELESS_GB;
- break;
- }
- return raid;
-}
-
-u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int ratelen)
-{
- u8 network_type = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeext->cur_channel > 14) {
- network_type |= WIRELESS_INVALID;
- } else {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_24N;
-
- if (cckratesonly_included(rate, ratelen))
- network_type |= WIRELESS_11B;
- else if (cckrates_included(rate, ratelen))
- network_type |= WIRELESS_11BG;
- else
- network_type |= WIRELESS_11G;
- }
- return network_type;
-}
-
-static unsigned char ratetbl_val_2wifirate(unsigned char rate)
-{
- unsigned char val = 0;
-
- switch (rate & 0x7f) {
- case 0:
- val = IEEE80211_CCK_RATE_1MB;
- break;
- case 1:
- val = IEEE80211_CCK_RATE_2MB;
- break;
- case 2:
- val = IEEE80211_CCK_RATE_5MB;
- break;
- case 3:
- val = IEEE80211_CCK_RATE_11MB;
- break;
- case 4:
- val = IEEE80211_OFDM_RATE_6MB;
- break;
- case 5:
- val = IEEE80211_OFDM_RATE_9MB;
- break;
- case 6:
- val = IEEE80211_OFDM_RATE_12MB;
- break;
- case 7:
- val = IEEE80211_OFDM_RATE_18MB;
- break;
- case 8:
- val = IEEE80211_OFDM_RATE_24MB;
- break;
- case 9:
- val = IEEE80211_OFDM_RATE_36MB;
- break;
- case 10:
- val = IEEE80211_OFDM_RATE_48MB;
- break;
- case 11:
- val = IEEE80211_OFDM_RATE_54MB;
- break;
- }
- return val;
-}
-
-static bool is_basicrate(struct adapter *padapter, unsigned char rate)
-{
- int i;
- unsigned char val;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- for (i = 0; i < NumRates; i++) {
- val = pmlmeext->basicrate[i];
-
- if ((val != 0xff) && (val != 0xfe)) {
- if (rate == ratetbl_val_2wifirate(val))
- return true;
- }
- }
- return false;
-}
-
-static unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset)
-{
- int i;
- unsigned char rate;
- unsigned int len = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- for (i = 0; i < NumRates; i++) {
- rate = pmlmeext->datarate[i];
-
- switch (rate) {
- case 0xff:
- return len;
- case 0xfe:
- continue;
- default:
- rate = ratetbl_val_2wifirate(rate);
-
- if (is_basicrate(padapter, rate))
- rate |= IEEE80211_BASIC_RATE_MASK;
-
- rateset[len] = rate;
- len++;
- break;
- }
- }
- return len;
-}
-
-void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len)
-{
- unsigned char supportedrates[NumRates];
-
- memset(supportedrates, 0, NumRates);
- *bssrate_len = ratetbl2rateset(padapter, supportedrates);
- memcpy(pbssrate, supportedrates, *bssrate_len);
-}
-
-void Save_DM_Func_Flag(struct adapter *padapter)
-{
- struct hal_data_8188e *haldata = &padapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- odmpriv->BK_SupportAbility = odmpriv->SupportAbility;
-}
-
-void Restore_DM_Func_Flag(struct adapter *padapter)
-{
- struct hal_data_8188e *haldata = &padapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- odmpriv->SupportAbility = odmpriv->BK_SupportAbility;
-}
-
-void Set_MSR(struct adapter *padapter, u8 type)
-{
- u8 val8;
- int res;
-
- res = rtw_read8(padapter, MSR, &val8);
- if (res)
- return;
-
- val8 &= 0x0c;
- val8 |= type;
- rtw_write8(padapter, MSR, val8);
-}
-
-inline u8 rtw_get_oper_ch(struct adapter *adapter)
-{
- return adapter->mlmeextpriv.oper_channel;
-}
-
-inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
-{
- adapter->mlmeextpriv.oper_channel = ch;
-}
-
-inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
-{
- adapter->mlmeextpriv.oper_bwmode = bw;
-}
-
-inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
-{
- adapter->mlmeextpriv.oper_ch_offset = offset;
-}
-
-void SelectChannel(struct adapter *padapter, unsigned char channel)
-{
- /* saved channel info */
- rtw_set_oper_ch(padapter, channel);
- PHY_SwChnl8188E(padapter, channel);
-}
-
-void SetBWMode(struct adapter *padapter, unsigned short bwmode,
- unsigned char channel_offset)
-{
- /* saved bw info */
- rtw_set_oper_bw(padapter, bwmode);
- rtw_set_oper_choffset(padapter, channel_offset);
-
- PHY_SetBWMode8188E(padapter, (enum ht_channel_width)bwmode, channel_offset);
-}
-
-void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode)
-{
- u8 center_ch;
-
- if ((bwmode == HT_CHANNEL_WIDTH_20) ||
- (channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)) {
- /* SelectChannel(padapter, channel); */
- center_ch = channel;
- } else {
- /* switch to the proper channel */
- if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
- /* SelectChannel(padapter, channel + 2); */
- center_ch = channel + 2;
- } else {
- /* SelectChannel(padapter, channel - 2); */
- center_ch = channel - 2;
- }
- }
-
- /* set Channel */
- /* saved channel/bw info */
- rtw_set_oper_ch(padapter, channel);
- rtw_set_oper_bw(padapter, bwmode);
- rtw_set_oper_choffset(padapter, channel_offset);
-
- PHY_SwChnl8188E(padapter, center_ch); /* set center channel */
- SetBWMode(padapter, bwmode, channel_offset);
-}
-
-__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
-{
- return pnetwork->MacAddress;
-}
-
-u16 get_beacon_interval(struct wlan_bssid_ex *bss)
-{
- __le16 val;
- memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
-
- return le16_to_cpu(val);
-}
-
-bool r8188eu_is_client_associated_to_ap(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
-
- if (!padapter)
- return false;
-
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE))
- return true;
-
- return false;
-}
-
-bool r8188eu_is_client_associated_to_ibss(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE))
- return true;
-
- return false;
-}
-
-bool r8188eu_is_ibss_empty(struct adapter *padapter)
-{
- unsigned int i;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
- if (pmlmeinfo->FW_sta_info[i].status == 1)
- return false;
- }
- return true;
-}
-
-unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
-{
- if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
- return WAIT_FOR_BCN_TO_MIN;
- else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
- return WAIT_FOR_BCN_TO_MAX;
- else
- return bcn_interval << 2;
-}
-
-void invalidate_cam_all(struct adapter *padapter)
-{
- rtw_write32(padapter, RWCAM, BIT(31) | BIT(30));
-}
-
-void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
-{
- unsigned int i, val, addr;
- int j;
- u32 cam_val[2];
-
- addr = entry << 3;
-
- for (j = 5; j >= 0; j--) {
- switch (j) {
- case 0:
- val = (ctrl | (mac[0] << 16) | (mac[1] << 24));
- break;
- case 1:
- val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24));
- break;
- default:
- i = (j - 2) << 2;
- val = (key[i] | (key[i + 1] << 8) | (key[i + 2] << 16) | (key[i + 3] << 24));
- break;
- }
-
- cam_val[0] = val;
- cam_val[1] = addr + (unsigned int)j;
-
- rtw_write32(padapter, WCAMI, cam_val[0]);
- rtw_write32(padapter, RWCAM, CAM_POLLINIG | CAM_WRITE | cam_val[1]);
- }
-}
-
-void clear_cam_entry(struct adapter *padapter, u8 entry)
-{
- unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- write_cam(padapter, entry, 0, null_sta, null_key);
-}
-
-int allocate_fw_sta_entry(struct adapter *padapter)
-{
- unsigned int mac_id;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
- if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
- pmlmeinfo->FW_sta_info[mac_id].status = 1;
- pmlmeinfo->FW_sta_info[mac_id].retry = 0;
- break;
- }
- }
-
- return mac_id;
-}
-
-void flush_all_cam_entry(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtw_write32(padapter, RWCAM, BIT(31) | BIT(30));
-
- memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
-}
-
-int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
-{
- /* struct registry_priv *pregpriv = &padapter->registrypriv; */
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmepriv->qospriv.qos_option == 0) {
- pmlmeinfo->WMM_enable = 0;
- return _FAIL;
- }
-
- pmlmeinfo->WMM_enable = 1;
- memcpy(&pmlmeinfo->WMM_param, pIE->data + 6, sizeof(struct WMM_para_element));
- return true;
-}
-
-static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask)
-{
- u8 acmctrl;
- int res = rtw_read8(adapter, REG_ACMHWCTRL, &acmctrl);
-
- if (res)
- return;
-
- if (acm_mask > 1)
- acmctrl = acmctrl | 0x1;
-
- if (acm_mask & BIT(3))
- acmctrl |= ACMHW_VOQEN;
- else
- acmctrl &= (~ACMHW_VOQEN);
-
- if (acm_mask & BIT(2))
- acmctrl |= ACMHW_VIQEN;
- else
- acmctrl &= (~ACMHW_VIQEN);
-
- if (acm_mask & BIT(1))
- acmctrl |= ACMHW_BEQEN;
- else
- acmctrl &= (~ACMHW_BEQEN);
-
- rtw_write8(adapter, REG_ACMHWCTRL, acmctrl);
-}
-
-void WMMOnAssocRsp(struct adapter *padapter)
-{
- u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
- u8 acm_mask;
- u16 TXOP;
- u32 acParm, i;
- u32 edca[4], inx[4];
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct hal_data_8188e *haldata = &padapter->haldata;
-
- if (pmlmeinfo->WMM_enable == 0) {
- padapter->mlmepriv.acm_mask = 0;
- return;
- }
-
- acm_mask = 0;
-
- if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
- aSifsTime = 10;
- else
- aSifsTime = 16;
-
- for (i = 0; i < 4; i++) {
- ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
- ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
-
- /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
- AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime;
-
- ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f);
- ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
- TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
-
- acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
-
- switch (ACI) {
- case 0x0:
- haldata->AcParam_BE = acParm;
- rtw_write32(padapter, REG_EDCA_BE_PARAM, acParm);
- acm_mask |= (ACM ? BIT(1) : 0);
- edca[XMIT_BE_QUEUE] = acParm;
- break;
- case 0x1:
- rtw_write32(padapter, REG_EDCA_BK_PARAM, acParm);
- edca[XMIT_BK_QUEUE] = acParm;
- break;
- case 0x2:
- rtw_write32(padapter, REG_EDCA_VI_PARAM, acParm);
- acm_mask |= (ACM ? BIT(2) : 0);
- edca[XMIT_VI_QUEUE] = acParm;
- break;
- case 0x3:
- rtw_write32(padapter, REG_EDCA_VO_PARAM, acParm);
- acm_mask |= (ACM ? BIT(3) : 0);
- edca[XMIT_VO_QUEUE] = acParm;
- break;
- }
- }
-
- if (padapter->registrypriv.acm_method == 1)
- set_acm_ctrl(padapter, acm_mask);
- else
- padapter->mlmepriv.acm_mask = acm_mask;
-
- inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
-
- if (pregpriv->wifi_spec == 1) {
- u32 j, change_inx = false;
-
- /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
- for (i = 0; i < 4; i++) {
- for (j = i + 1; j < 4; j++) {
- /* compare CW and AIFS */
- if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
- change_inx = true;
- } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
- /* compare TXOP */
- if ((edca[j] >> 16) > (edca[i] >> 16))
- change_inx = true;
- }
-
- if (change_inx) {
- swap(edca[i], edca[j]);
- swap(inx[i], inx[j]);
-
- change_inx = false;
- }
- }
- }
- }
-
- for (i = 0; i < 4; i++)
- pxmitpriv->wmm_para_seq[i] = inx[i];
-}
-
-static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
-{
- unsigned char new_bwmode;
- unsigned char new_ch_offset;
- struct HT_info_element *pHT_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (!pIE)
- return;
-
- if (!phtpriv)
- return;
-
- if (pIE->Length > sizeof(struct HT_info_element))
- return;
-
- pHT_info = (struct HT_info_element *)pIE->data;
-
- if ((pHT_info->infos[0] & BIT(2)) && pregistrypriv->cbw40_enable) {
- new_bwmode = HT_CHANNEL_WIDTH_40;
-
- switch (pHT_info->infos[0] & 0x3) {
- case 1:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case 3:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- } else {
- new_bwmode = HT_CHANNEL_WIDTH_20;
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
-
- if ((new_bwmode != pmlmeext->cur_bwmode) ||
- (new_ch_offset != pmlmeext->cur_ch_offset)) {
- pmlmeinfo->bwmode_updated = true;
-
- pmlmeext->cur_bwmode = new_bwmode;
- pmlmeext->cur_ch_offset = new_ch_offset;
-
- /* update HT info also */
- HT_info_handler(padapter, pIE);
- } else {
- pmlmeinfo->bwmode_updated = false;
- }
-
- if (pmlmeinfo->bwmode_updated) {
- struct sta_info *psta;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
-
- /* update ap's stainfo */
- psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
- if (psta) {
- struct ht_priv *phtpriv_sta = &psta->htpriv;
-
- if (phtpriv_sta->ht_option) {
- /* bwmode */
- phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
- phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
- } else {
- phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
- phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
- }
- }
-}
-
-void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
-{
- unsigned int i;
- u8 max_AMPDU_len, min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (!pIE)
- return;
-
- if (!phtpriv->ht_option)
- return;
-
- pmlmeinfo->HT_caps_enable = 1;
-
- for (i = 0; i < (pIE->Length); i++) {
- if (i != 2) {
- /* Got the endian issue here. */
- pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
- } else {
- /* modify from fw by Thomas 2010/11/17 */
- max_AMPDU_len = min(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3,
- pIE->data[i] & 0x3);
-
- min_MPDU_spacing = max(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c,
- pIE->data[i] & 0x1c);
-
- pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
- }
- }
-
- /* update the MCS rates */
- for (i = 0; i < 16; i++)
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
-}
-
-void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (!pIE)
- return;
-
- if (!phtpriv->ht_option)
- return;
-
- if (pIE->Length > sizeof(struct HT_info_element))
- return;
-
- pmlmeinfo->HT_info_enable = 1;
- memcpy(&pmlmeinfo->HT_info, pIE->data, pIE->Length);
-}
-
-static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
-{
- u8 sec_spacing;
- int res;
-
- if (spacing <= 7) {
- switch (adapter->securitypriv.dot11PrivacyAlgrthm) {
- case _NO_PRIVACY_:
- case _AES_:
- sec_spacing = 0;
- break;
- case _WEP40_:
- case _WEP104_:
- case _TKIP_:
- case _TKIP_WTMIC_:
- sec_spacing = 6;
- break;
- default:
- sec_spacing = 7;
- break;
- }
-
- if (spacing < sec_spacing)
- spacing = sec_spacing;
-
- res = rtw_read8(adapter, REG_AMPDU_MIN_SPACE, &sec_spacing);
- if (res)
- return;
-
- rtw_write8(adapter, REG_AMPDU_MIN_SPACE,
- (sec_spacing & 0xf8) | spacing);
- }
-}
-
-static void set_ampdu_factor(struct adapter *adapter, u8 factor)
-{
- u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9};
- u8 FactorToSet;
- u8 *pRegToSet;
- u8 index = 0;
-
- pRegToSet = RegToSet_Normal; /* 0xb972a841; */
- FactorToSet = factor;
- if (FactorToSet <= 3) {
- FactorToSet = (1 << (FactorToSet + 2));
- if (FactorToSet > 0xf)
- FactorToSet = 0xf;
-
- for (index = 0; index < 4; index++) {
- if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
- pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4);
-
- if ((pRegToSet[index] & 0x0f) > FactorToSet)
- pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
-
- rtw_write8(adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]);
- }
- }
-}
-
-void HTOnAssocRsp(struct adapter *padapter)
-{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- /* struct registry_priv *pregpriv = &padapter->registrypriv; */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
- pmlmeinfo->HT_enable = 1;
- } else {
- pmlmeinfo->HT_enable = 0;
- return;
- }
-
- /* handle A-MPDU parameter field */
- /*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
-
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
-
- set_min_ampdu_spacing(padapter, min_MPDU_spacing);
-
- set_ampdu_factor(padapter, max_AMPDU_len);
-}
-
-void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pIE->Length > 1)
- return;
-
- pmlmeinfo->ERP_enable = 1;
- memcpy(&pmlmeinfo->ERP_IE, pIE->data, pIE->Length);
-}
-
-void VCS_update(struct adapter *padapter, struct sta_info *psta)
-{
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
- case 0: /* off */
- psta->rtsen = 0;
- psta->cts2self = 0;
- break;
- case 1: /* on */
- if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
- psta->rtsen = 1;
- psta->cts2self = 0;
- } else {
- psta->rtsen = 0;
- psta->cts2self = 1;
- }
- break;
- case 2: /* auto */
- default:
- if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) {
- if (pregpriv->vcs_type == 1) {
- psta->rtsen = 1;
- psta->cts2self = 0;
- } else {
- psta->rtsen = 0;
- psta->cts2self = 1;
- }
- } else {
- psta->rtsen = 0;
- psta->cts2self = 0;
- }
- break;
- }
-}
-
-int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)pframe;
- unsigned int len;
- unsigned char *p;
- unsigned short val16;
- struct wlan_network *cur_network = &Adapter->mlmepriv.cur_network;
- /* u8 wpa_ie[255], rsn_ie[255]; */
- u16 wpa_len = 0, rsn_len = 0;
- u8 encryp_protocol = 0;
- struct wlan_bssid_ex *bssid;
- int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0;
- unsigned char *pbuf;
- u32 wpa_ielen = 0;
- u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
- struct HT_info_element *pht_info = NULL;
- struct ieee80211_ht_cap *pht_cap = NULL;
- u32 bcn_channel;
- unsigned short ht_cap_info;
- unsigned char ht_info_infos_0;
-
- if (!r8188eu_is_client_associated_to_ap(Adapter))
- return true;
-
- len = packet_len - sizeof(struct ieee80211_hdr_3addr);
-
- if (len > MAX_IE_SZ)
- return _FAIL;
-
- if (memcmp(cur_network->network.MacAddress, pbssid, 6))
- return true;
-
- bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
- if (!bssid)
- return _FAIL;
-
- if (ieee80211_is_beacon(mgmt->frame_control))
- bssid->Reserved[0] = 1;
-
- bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
-
- /* below is to copy the information element */
- bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
-
- /* check bw and channel offset */
- /* parsing HT_CAP_IE */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p && len > 0) {
- pht_cap = (struct ieee80211_ht_cap *)(p + 2);
- ht_cap_info = le16_to_cpu(pht_cap->cap_info);
- } else {
- ht_cap_info = 0;
- }
- /* parsing HT_INFO_IE */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p && len > 0) {
- pht_info = (struct HT_info_element *)(p + 2);
- ht_info_infos_0 = pht_info->infos[0];
- } else {
- ht_info_infos_0 = 0;
- }
- if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
- ((ht_info_infos_0 & 0x03) != (cur_network->BcnInfo.ht_info_infos_0 & 0x03))) {
- /* bcn_info_update */
- cur_network->BcnInfo.ht_cap_info = ht_cap_info;
- cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
- /* to do : need to check that whether modify related register of BB or not */
- /* goto _mismatch; */
- }
-
- /* Checking for channel */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p) {
- bcn_channel = *(p + 2);
- } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (pht_info)
- bcn_channel = pht_info->primary_channel;
- else /* we don't find channel IE, so don't check it */
- bcn_channel = Adapter->mlmeextpriv.cur_channel;
- }
- if (bcn_channel != Adapter->mlmeextpriv.cur_channel)
- goto _mismatch;
-
- /* checking SSID */
- p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (!p)
- hidden_ssid = true;
- else
- hidden_ssid = false;
-
- if (p && (!hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
- }
-
- if (memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
- bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
- /* not hidden ssid */
- if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0)
- goto _mismatch;
- }
-
- /* check encryption info */
- val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
-
- if (val16 & BIT(4))
- bssid->Privacy = 1;
- else
- bssid->Privacy = 0;
-
- if (cur_network->network.Privacy != bssid->Privacy)
- goto _mismatch;
-
- rtw_get_sec_ie(bssid->IEs, bssid->IELength, NULL, &rsn_len, NULL, &wpa_len);
-
- if (rsn_len > 0) {
- encryp_protocol = ENCRYP_PROTOCOL_WPA2;
- } else if (wpa_len > 0) {
- encryp_protocol = ENCRYP_PROTOCOL_WPA;
- } else {
- if (bssid->Privacy)
- encryp_protocol = ENCRYP_PROTOCOL_WEP;
- }
-
- if (cur_network->BcnInfo.encryp_protocol != encryp_protocol)
- goto _mismatch;
-
- if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
- pbuf = rtw_get_wpa_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength - 12);
- if (pbuf && (wpa_ielen > 0)) {
- rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x);
- } else {
- pbuf = rtw_get_wpa2_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength - 12);
-
- if (pbuf && (wpa_ielen > 0))
- rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x);
- }
-
- if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher ||
- group_cipher != cur_network->BcnInfo.group_cipher)
- goto _mismatch;
-
- if (is_8021x != cur_network->BcnInfo.is_8021x)
- goto _mismatch;
- }
-
- kfree(bssid);
-
- return _SUCCESS;
-
-_mismatch:
- kfree(bssid);
-
- return _FAIL;
-}
-
-void update_beacon_info(struct adapter *padapter, u8 *ie_ptr, uint ie_len, struct sta_info *psta)
-{
- unsigned int i;
- struct ndis_802_11_var_ie *pIE;
-
- for (i = 0; i < ie_len;) {
- pIE = (struct ndis_802_11_var_ie *)(ie_ptr + i);
-
- switch (pIE->ElementID) {
- case _HT_EXTRA_INFO_IE_: /* HT info */
- /* HT_info_handler(padapter, pIE); */
- bwmode_update_check(padapter, pIE);
- break;
- case _ERPINFO_IE_:
- ERP_IE_handler(padapter, pIE);
- VCS_update(padapter, psta);
- break;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
-}
-
-bool is_ap_in_tkip(struct adapter *padapter)
-{
- u32 i;
- struct ndis_802_11_var_ie *pIE;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
- return true;
- break;
- case _RSN_IE_2_:
- if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
- return true;
- break;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
- return false;
- } else {
- return false;
- }
-}
-
-int wifirate2_ratetbl_inx(unsigned char rate)
-{
- int inx = 0;
- rate = rate & 0x7f;
-
- switch (rate) {
- case 54 * 2:
- inx = 11;
- break;
- case 48 * 2:
- inx = 10;
- break;
- case 36 * 2:
- inx = 9;
- break;
- case 24 * 2:
- inx = 8;
- break;
- case 18 * 2:
- inx = 7;
- break;
- case 12 * 2:
- inx = 6;
- break;
- case 9 * 2:
- inx = 5;
- break;
- case 6 * 2:
- inx = 4;
- break;
- case 11 * 2:
- inx = 3;
- break;
- case 11:
- inx = 2;
- break;
- case 2 * 2:
- inx = 1;
- break;
- case 1 * 2:
- inx = 0;
- break;
- }
- return inx;
-}
-
-unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz)
-{
- unsigned int i, num_of_rate;
- unsigned int mask = 0;
-
- num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
-
- for (i = 0; i < num_of_rate; i++) {
- if ((*(ptn + i)) & 0x80)
- mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
- }
- return mask;
-}
-
-unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz)
-{
- unsigned int i, num_of_rate;
- unsigned int mask = 0;
-
- num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
-
- for (i = 0; i < num_of_rate; i++)
- mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
- return mask;
-}
-
-unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps)
-{
- unsigned int mask = 0;
-
- mask = ((pHT_caps->u.HT_cap_element.MCS_rate[0] << 12) | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 20));
-
- return mask;
-}
-
-int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps)
-{
- unsigned char bit_offset;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (!(pmlmeinfo->HT_enable))
- return _FAIL;
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK)
- return _FAIL;
-
- bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40) ? 6 : 5;
-
- if (__le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset))
- return _SUCCESS;
- else
- return _FAIL;
-}
-
-unsigned char get_highest_rate_idx(u32 mask)
-{
- int i;
- unsigned char rate_idx = 0;
-
- for (i = 27; i >= 0; i--) {
- if (mask & BIT(i)) {
- rate_idx = i;
- break;
- }
- }
- return rate_idx;
-}
-
-void Update_RA_Entry(struct adapter *padapter, u32 mac_id)
-{
- rtw_hal_update_ra_mask(padapter, mac_id, 0);
-}
-
-static void enable_rate_adaptive(struct adapter *padapter, u32 mac_id)
-{
- Update_RA_Entry(padapter, mac_id);
-}
-
-void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
-{
- /* rate adaptive */
- enable_rate_adaptive(padapter, psta->mac_id);
-}
-
-void rtw_set_basic_rate(struct adapter *adapter, u8 *rates)
-{
- u16 BrateCfg = 0;
- u8 RateIndex = 0;
- int res;
- u8 reg;
-
- /* 2007.01.16, by Emily */
- /* Select RRSR (in Legacy-OFDM and CCK) */
- /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
- /* We do not use other rates. */
- HalSetBrateCfg(adapter, rates, &BrateCfg);
-
- /* 2011.03.30 add by Luke Lee */
- /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
- /* because CCK 2M has poor TXEVM */
- /* CCK 5.5M & 11M ACK should be enabled for better performance */
-
- BrateCfg = (BrateCfg | 0xd) & 0x15d;
-
- BrateCfg |= 0x01; /* default enable 1M ACK rate */
- /* Set RRSR rate table. */
- rtw_write8(adapter, REG_RRSR, BrateCfg & 0xff);
- rtw_write8(adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff);
- res = rtw_read8(adapter, REG_RRSR + 2, &reg);
- if (res)
- return;
-
- rtw_write8(adapter, REG_RRSR + 2, reg & 0xf0);
-
- /* Set RTS initial rate */
- while (BrateCfg > 0x1) {
- BrateCfg = (BrateCfg >> 1);
- RateIndex++;
- }
- /* Ziv - Check */
- rtw_write8(adapter, REG_INIRTS_RATE_SEL, RateIndex);
-}
-
-/* Update RRSR and Rate for USERATE */
-void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
-{
- unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Added by Albert 2011/03/22 */
- /* In the P2P mode, the driver should not support the b mode. */
- /* So, the Tx packet shouldn't use the CCK rate */
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
- return;
- memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
-
- if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B))
- memcpy(supported_rates, rtw_basic_rate_cck, 4);
- else if (wirelessmode & WIRELESS_11B)
- memcpy(supported_rates, rtw_basic_rate_mix, 7);
- else
- memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
-
- if (wirelessmode & WIRELESS_11B)
- update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
- else
- update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
-
- rtw_set_basic_rate(padapter, supported_rates);
-}
-
-unsigned char check_assoc_AP(u8 *pframe, uint len)
-{
- unsigned int i;
- struct ndis_802_11_var_ie *pIE;
- u8 epigram_vendor_flag;
- u8 ralink_vendor_flag;
- epigram_vendor_flag = 0;
- ralink_vendor_flag = 0;
-
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < len;) {
- pIE = (struct ndis_802_11_var_ie *)(pframe + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
- (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
- return HT_IOT_PEER_ATHEROS;
- } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
- return HT_IOT_PEER_BROADCOM;
- } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
- return HT_IOT_PEER_MARVELL;
- } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
- if (!ralink_vendor_flag) {
- ralink_vendor_flag = 1;
- } else {
- return HT_IOT_PEER_RALINK;
- }
- } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
- return HT_IOT_PEER_CISCO;
- } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
- return HT_IOT_PEER_REALTEK;
- } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
- return HT_IOT_PEER_AIRGO;
- } else if (!memcmp(pIE->data, EPIGRAM_OUI, 3)) {
- epigram_vendor_flag = 1;
- if (ralink_vendor_flag)
- return HT_IOT_PEER_TENDA;
- } else {
- break;
- }
- break;
-
- default:
- break;
- }
- i += (pIE->Length + 2);
- }
-
- if (ralink_vendor_flag && !epigram_vendor_flag)
- return HT_IOT_PEER_RALINK;
- else if (ralink_vendor_flag && epigram_vendor_flag)
- return HT_IOT_PEER_TENDA;
- else
- return HT_IOT_PEER_UNKNOWN;
-}
-
-void update_IOT_info(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- switch (pmlmeinfo->assoc_AP_vendor) {
- case HT_IOT_PEER_MARVELL:
- pmlmeinfo->turboMode_cts2self = 1;
- pmlmeinfo->turboMode_rtsen = 0;
- break;
- case HT_IOT_PEER_RALINK:
- pmlmeinfo->turboMode_cts2self = 0;
- pmlmeinfo->turboMode_rtsen = 1;
- break;
- case HT_IOT_PEER_REALTEK:
- /* rtw_write16(padapter, 0x4cc, 0xffff); */
- /* rtw_write16(padapter, 0x546, 0x01c0); */
- break;
- default:
- pmlmeinfo->turboMode_cts2self = 0;
- pmlmeinfo->turboMode_rtsen = 1;
- break;
- }
-}
-
-static void set_ack_preamble(struct adapter *adapter, bool short_preamble)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- u8 val8;
-
- /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
- val8 = haldata->nCur40MhzPrimeSC << 5;
- if (short_preamble)
- val8 |= 0x80;
-
- rtw_write8(adapter, REG_RRSR + 2, val8);
-};
-
-static void set_slot_time(struct adapter *adapter, u8 slot_time)
-{
- u8 u1bAIFS, aSifsTime;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtw_write8(adapter, REG_SLOT, slot_time);
-
- if (pmlmeinfo->WMM_enable == 0) {
- if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
- aSifsTime = 10;
- else
- aSifsTime = 16;
-
- u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
-
- /* <Roger_EXP> Temporary removed, 2008.06.20. */
- rtw_write8(adapter, REG_EDCA_VO_PARAM, u1bAIFS);
- rtw_write8(adapter, REG_EDCA_VI_PARAM, u1bAIFS);
- rtw_write8(adapter, REG_EDCA_BE_PARAM, u1bAIFS);
- rtw_write8(adapter, REG_EDCA_BK_PARAM, u1bAIFS);
- }
-}
-
-void update_capinfo(struct adapter *Adapter, u16 updateCap)
-{
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* Check preamble mode, 2005.01.06, by rcnjko. */
- /* Mark to update preamble value forever, 2008.03.18 by lanhsin */
-
- if (updateCap & cShortPreamble) { /* Short Preamble */
- if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
- pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
- set_ack_preamble(Adapter, true);
- }
- } else { /* Long Preamble */
- if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
- pmlmeinfo->preamble_mode = PREAMBLE_LONG;
- set_ack_preamble(Adapter, false);
- }
- }
-
- if (updateCap & cIBSS) {
- /* Filen: See 802.11-2007 p.91 */
- pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
- } else { /* Filen: See 802.11-2007 p.90 */
- if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11_24N)) {
- if (updateCap & cShortSlotTime) { /* Short Slot Time */
- if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
- pmlmeinfo->slotTime = SHORT_SLOT_TIME;
- } else { /* Long Slot Time */
- if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
- pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
- }
- } else {
- /* B Mode */
- pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
- }
- }
-
- set_slot_time(Adapter, pmlmeinfo->slotTime);
-}
-
-void update_wireless_mode(struct adapter *padapter)
-{
- int ratelen, network_type = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- unsigned char *rate = cur_network->SupportedRates;
-
- ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
-
- if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
- pmlmeinfo->HT_enable = 1;
-
- if (pmlmeext->cur_channel > 14) {
- network_type |= WIRELESS_INVALID;
- } else {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_24N;
-
- if (cckratesonly_included(rate, ratelen))
- network_type |= WIRELESS_11B;
- else if (cckrates_included(rate, ratelen))
- network_type |= WIRELESS_11BG;
- else
- network_type |= WIRELESS_11G;
- }
-
- pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
-
- /* RESP_SIFS for CCK */
- rtw_write8(padapter, REG_R2T_SIFS, 0x08);
- rtw_write8(padapter, REG_R2T_SIFS + 1, 0x08);
- /* RESP_SIFS for OFDM */
- rtw_write8(padapter, REG_T2T_SIFS, 0x0a);
- rtw_write8(padapter, REG_T2T_SIFS + 1, 0x0a);
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
- else
- update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
-}
-
-void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
- /* Only B, B/G, and B/G/N AP could use CCK rate */
- memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_cck, 4);
- } else {
- memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_ofdm, 3);
- }
-}
-
-int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx)
-{
- unsigned int ie_len;
- struct ndis_802_11_var_ie *pIE;
- int supportRateNum = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (!pIE)
- return _FAIL;
-
- memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
- supportRateNum = ie_len;
-
- pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE)
- memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
-
- return _SUCCESS;
-}
-
-void beacon_timing_control(struct adapter *padapter)
-{
- SetBeaconRelatedRegisters8188EUsb(padapter);
-}
-
-static struct adapter *pbuddy_padapter;
-
-void rtw_handle_dualmac(struct adapter *adapter, bool init)
-{
- if (init) {
- if (!pbuddy_padapter) {
- pbuddy_padapter = adapter;
- } else {
- adapter->pbuddy_adapter = pbuddy_padapter;
- pbuddy_padapter->pbuddy_adapter = adapter;
- /* clear global value */
- pbuddy_padapter = NULL;
- }
- } else {
- pbuddy_padapter = NULL;
- }
-}
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
deleted file mode 100644
index df88b3e29e77..000000000000
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ /dev/null
@@ -1,2179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_XMIT_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-#include "../include/usb_osintf.h"
-#include "../include/rtl8188e_xmit.h"
-
-static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-
-static void _init_txservq(struct tx_servq *ptxservq)
-{
- INIT_LIST_HEAD(&ptxservq->tx_pending);
- INIT_LIST_HEAD(&ptxservq->sta_pending);
- ptxservq->qcnt = 0;
-}
-
-void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
-{
- memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
- spin_lock_init(&psta_xmitpriv->lock);
- _init_txservq(&psta_xmitpriv->be_q);
- _init_txservq(&psta_xmitpriv->bk_q);
- _init_txservq(&psta_xmitpriv->vi_q);
- _init_txservq(&psta_xmitpriv->vo_q);
-}
-
-static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf,
- u32 alloc_sz)
-{
- pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
- if (!pxmitbuf->pallocated_buf)
- return -ENOMEM;
-
- pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
-
- pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb) {
- kfree(pxmitbuf->pallocated_buf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void rtw_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf,
- u32 free_sz)
-{
- usb_free_urb(pxmitbuf->pxmit_urb);
- kfree(pxmitbuf->pallocated_buf);
-}
-
-int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
-{
- int i;
- struct xmit_buf *pxmitbuf;
- struct xmit_frame *pxframe;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
-
- spin_lock_init(&pxmitpriv->lock);
-
- /*
- * Please insert all the queue initializaiton using rtw_init_queue below
- */
-
- pxmitpriv->adapter = padapter;
-
- INIT_LIST_HEAD(&pxmitpriv->be_pending);
- INIT_LIST_HEAD(&pxmitpriv->bk_pending);
- INIT_LIST_HEAD(&pxmitpriv->vi_pending);
- INIT_LIST_HEAD(&pxmitpriv->vo_pending);
-
- rtw_init_queue(&pxmitpriv->free_xmit_queue);
-
- /*
- * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
- * and initialize free_xmit_frame below.
- * Please also apply free_txobj to link_up all the xmit_frames...
- */
-
- pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
-
- if (!pxmitpriv->pallocated_frame_buf) {
- pxmitpriv->pxmit_frame_buf = NULL;
- goto exit;
- }
- pxmitpriv->pxmit_frame_buf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_frame_buf), 4);
- /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
- /* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */
-
- pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
-
- for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&pxframe->list);
-
- pxframe->padapter = padapter;
- pxframe->frame_tag = NULL_FRAMETAG;
-
- pxframe->pkt = NULL;
-
- pxframe->buf_addr = NULL;
- pxframe->pxmitbuf = NULL;
-
- list_add_tail(&pxframe->list, &pxmitpriv->free_xmit_queue.queue);
-
- pxframe++;
- }
-
- pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
-
- pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
-
- /* init xmit_buf */
- rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
- rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
-
- pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
-
- if (!pxmitpriv->pallocated_xmitbuf)
- goto free_frame_buf;
-
- pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
- /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
- /* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */
-
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
-
- for (i = 0; i < NR_XMITBUFF; i++) {
- INIT_LIST_HEAD(&pxmitbuf->list);
-
- pxmitbuf->priv_data = NULL;
- pxmitbuf->padapter = padapter;
- pxmitbuf->ext_tag = false;
-
- /* Tx buf allocation may fail sometimes, so sleep and retry. */
- if (rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ))) {
- msleep(10);
- if (rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ)))
- goto free_xmitbuf;
- }
-
- pxmitbuf->high_queue = false;
-
- list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmitbuf_queue.queue);
- pxmitbuf++;
- }
-
- pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
-
- /* Init xmit extension buff */
- rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
-
- pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
-
- if (!pxmitpriv->pallocated_xmit_extbuf)
- goto free_xmitbuf;
-
- pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
-
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
-
- for (i = 0; i < num_xmit_extbuf; i++) {
- INIT_LIST_HEAD(&pxmitbuf->list);
-
- pxmitbuf->priv_data = NULL;
- pxmitbuf->padapter = padapter;
- pxmitbuf->ext_tag = true;
-
- if (rtw_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ))
- goto free_xmit_extbuf;
-
- list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
- pxmitbuf++;
- }
-
- pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
-
- if (rtw_alloc_hwxmits(padapter))
- goto free_xmit_extbuf;
-
- for (i = 0; i < 4; i++)
- pxmitpriv->wmm_para_seq[i] = i;
-
- pxmitpriv->ack_tx = false;
- mutex_init(&pxmitpriv->ack_tx_mutex);
- rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
-
- tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet, (unsigned long)padapter);
-
- return 0;
-
-free_xmit_extbuf:
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
- while (i--) {
- rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
- pxmitbuf++;
- }
- vfree(pxmitpriv->pallocated_xmit_extbuf);
- i = NR_XMITBUFF;
-free_xmitbuf:
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- while (i--) {
- rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
- pxmitbuf++;
- }
- vfree(pxmitpriv->pallocated_xmitbuf);
-free_frame_buf:
- vfree(pxmitpriv->pallocated_frame_buf);
-exit:
- return -ENOMEM;
-}
-
-static void rtw_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
-{
- u16 queue;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
- (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
- netif_wake_subqueue(padapter->pnetdev, queue);
- } else {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue))
- netif_wake_subqueue(padapter->pnetdev, queue);
- }
-
- dev_kfree_skb_any(pkt);
-}
-
-void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
-{
- if (pxframe->pkt)
- rtw_pkt_complete(padapter, pxframe->pkt);
- pxframe->pkt = NULL;
-}
-
-void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
-{
- int i;
- struct adapter *padapter = pxmitpriv->adapter;
- struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-
- if (!pxmitpriv->pxmit_frame_buf)
- return;
-
- for (i = 0; i < NR_XMITFRAME; i++) {
- rtw_xmit_complete(padapter, pxmitframe);
-
- pxmitframe++;
- }
-
- for (i = 0; i < NR_XMITBUFF; i++) {
- rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
- pxmitbuf++;
- }
-
- vfree(pxmitpriv->pallocated_frame_buf);
-
- vfree(pxmitpriv->pallocated_xmitbuf);
-
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
- for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
- pxmitbuf++;
- }
-
- vfree(pxmitpriv->pallocated_xmit_extbuf);
-
- kfree(pxmitpriv->hwxmits);
-
- mutex_destroy(&pxmitpriv->ack_tx_mutex);
-}
-
-static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- u32 sz;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct sta_info *psta = pattrib->psta;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pattrib->nr_frags != 1)
- sz = padapter->xmitpriv.frag_len;
- else /* no frag */
- sz = pattrib->last_txcmdsz;
-
- /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
- /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
- /* Other fragments are protected by previous fragment. */
- /* So we only need to check the length of first fragment. */
- if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
- if (sz > padapter->registrypriv.rts_thresh) {
- pattrib->vcs_mode = RTS_CTS;
- } else {
- if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
- else
- pattrib->vcs_mode = NONE_VCS;
- }
- } else {
- while (true) {
- /* IOT action */
- if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && pattrib->ampdu_en &&
- (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
- pattrib->vcs_mode = CTS_TO_SELF;
- break;
- }
-
- /* check ERP protection */
- if (psta->rtsen || psta->cts2self) {
- if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
-
- break;
- }
-
- /* check HT op mode */
- if (pattrib->ht_en) {
- u8 htopmode = pmlmeinfo->HT_protection;
-
- if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) ||
- (!pmlmeext->cur_bwmode && htopmode == 3)) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
- }
-
- /* check rts */
- if (sz > padapter->registrypriv.rts_thresh) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
-
- /* to do list: check MIMO power save condition. */
-
- /* check AMPDU aggregation for TXOP */
- if (pattrib->ampdu_en) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
-
- pattrib->vcs_mode = NONE_VCS;
- break;
- }
- }
-}
-
-static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta)
-{
- /*if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
- else
- pattrib->vcs_mode = NONE_VCS;*/
-
- pattrib->mdata = 0;
- pattrib->eosp = 0;
- pattrib->triggered = 0;
-
- /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
- pattrib->qos_en = psta->qos_option;
-
- pattrib->raid = psta->raid;
- pattrib->ht_en = psta->htpriv.ht_option;
- pattrib->bwmode = psta->htpriv.bwmode;
- pattrib->ch_offset = psta->htpriv.ch_offset;
- pattrib->sgi = psta->htpriv.sgi;
- pattrib->ampdu_en = false;
- pattrib->retry_ctrl = false;
-}
-
-u8 qos_acm(u8 acm_mask, u8 priority)
-{
- u8 change_priority = priority;
-
- switch (priority) {
- case 0:
- case 3:
- if (acm_mask & BIT(1))
- change_priority = 1;
- break;
- case 1:
- case 2:
- break;
- case 4:
- case 5:
- if (acm_mask & BIT(2))
- change_priority = 0;
- break;
- case 6:
- case 7:
- if (acm_mask & BIT(3))
- change_priority = 5;
- break;
- default:
- break;
- }
-
- return change_priority;
-}
-
-static void rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
-{
- if (!pktptr) {
- pr_err("8188eu: pktptr is NULL\n");
- return;
- }
- if (!pfile) {
- pr_err("8188eu: pfile is NULL\n");
- return;
- }
- pfile->pkt = pktptr;
- pfile->cur_addr = pktptr->data;
- pfile->buf_start = pktptr->data;
- pfile->pkt_len = pktptr->len;
- pfile->buf_len = pktptr->len;
-
- pfile->cur_buffer = pfile->buf_start;
-}
-
-static uint rtw_remainder_len(struct pkt_file *pfile)
-{
- return pfile->buf_len - ((size_t)(pfile->cur_addr) -
- (size_t)(pfile->buf_start));
-}
-
-static uint rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
-{
- uint len;
-
- len = min(rtw_remainder_len(pfile), rlen);
-
- if (rmem)
- skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
-
- pfile->cur_addr += len;
- pfile->pkt_len -= len;
-
- return len;
-}
-
-static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
-{
- struct ethhdr etherhdr;
- struct iphdr ip_hdr;
- s32 user_prio = 0;
-
- rtw_open_pktfile(ppktfile->pkt, ppktfile);
- rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
-
- /* get user_prio from IP hdr */
- if (pattrib->ether_type == 0x0800) {
- rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
-/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
- user_prio = ip_hdr.tos >> 5;
- } else if (pattrib->ether_type == 0x888e) {
- /* "When priority processing of data frames is supported, */
- /* a STA's SME should send EAPOL-Key frames at the highest priority." */
- user_prio = 7;
- }
-
- pattrib->priority = user_prio;
- pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
- pattrib->subtype = IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA;
-}
-
-static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
-{
- struct pkt_file pktfile;
- struct sta_info *psta = NULL;
- struct ethhdr etherhdr;
-
- bool bmcast;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- int res = _SUCCESS;
-
-
-
- rtw_open_pktfile(pkt, &pktfile);
- rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
-
- pattrib->ether_type = ntohs(etherhdr.h_proto);
-
- memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
- memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
-
- pattrib->pctrl = 0;
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
- memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
- memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
- memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
- }
-
- pattrib->pktlen = pktfile.pkt_len;
-
- if (pattrib->ether_type == ETH_P_IP) {
- /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
- /* to prevent DHCP protocol fail */
- u8 tmp[24];
-
- rtw_pktfile_read(&pktfile, &tmp[0], 24);
- pattrib->dhcp_pkt = 0;
- if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
- if (((tmp[21] == 68) && (tmp[23] == 67)) ||
- ((tmp[21] == 67) && (tmp[23] == 68))) {
- /* 68 : UDP BOOTP client */
- /* 67 : UDP BOOTP server */
- /* Use low rate to send DHCP packet. */
- pattrib->dhcp_pkt = 1;
- }
- }
- }
-
- /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
- if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
-
- bmcast = is_multicast_ether_addr(pattrib->ra);
-
- /* get sta_info */
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo(padapter);
- } else {
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
- if (!psta) { /* if we cannot get psta => drrp the pkt */
- res = _FAIL;
- goto exit;
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) && !(psta->state & _FW_LINKED)) {
- res = _FAIL;
- goto exit;
- }
- }
-
- if (psta) {
- pattrib->mac_id = psta->mac_id;
- pattrib->psta = psta;
- } else {
- /* if we cannot get psta => drop the pkt */
- res = _FAIL;
- goto exit;
- }
-
- pattrib->ack_policy = 0;
- /* get ether_hdr_len */
- pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
-
- pattrib->hdrlen = WLAN_HDR_A3_LEN;
- pattrib->subtype = IEEE80211_FTYPE_DATA;
- pattrib->priority = 0;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
- if (psta->qos_option)
- set_qos(&pktfile, pattrib);
- } else {
- if (pqospriv->qos_option) {
- set_qos(&pktfile, pattrib);
-
- if (pmlmepriv->acm_mask != 0)
- pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
- }
- }
-
- if (psta->ieee8021x_blocked) {
- pattrib->encrypt = 0;
-
- if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- res = _FAIL;
- goto exit;
- }
- } else {
- GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
-
- switch (psecuritypriv->dot11AuthAlgrthm) {
- case dot11AuthAlgrthm_Open:
- case dot11AuthAlgrthm_Shared:
- case dot11AuthAlgrthm_Auto:
- pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
- break;
- case dot11AuthAlgrthm_8021X:
- if (bmcast)
- pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
- else
- pattrib->key_idx = 0;
- break;
- default:
- pattrib->key_idx = 0;
- break;
- }
- }
-
- switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- pattrib->iv_len = 4;
- pattrib->icv_len = 4;
- break;
- case _TKIP_:
- pattrib->iv_len = 8;
- pattrib->icv_len = 4;
-
- if (padapter->securitypriv.busetkipkey == _FAIL) {
- res = _FAIL;
- goto exit;
- }
- break;
- case _AES_:
- pattrib->iv_len = 8;
- pattrib->icv_len = 8;
- break;
- default:
- pattrib->iv_len = 0;
- pattrib->icv_len = 0;
- break;
- }
-
- if (pattrib->encrypt &&
- (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted))
- pattrib->bswenc = true;
- else
- pattrib->bswenc = false;
-
- update_attrib_phy_info(pattrib, psta);
-
-exit:
-
- return res;
-}
-
-static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- int curfragnum, length;
- u8 *pframe, *payload, mic[8];
- struct mic_data micdata;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
- u8 hw_hdr_offset = 0;
-
- if (pattrib->psta)
- stainfo = pattrib->psta;
- else
- stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
-
- hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
-
- if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
- /* encode mic code */
- if (stainfo) {
- u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0};
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- if (is_multicast_ether_addr(pattrib->ra)) {
- if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
- return _FAIL;
- /* start to calculate the mic code */
- rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
- } else {
- if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16)) {
- /* msleep(10); */
- return _FAIL;
- }
- /* start to calculate the mic code */
- rtw_secmicsetkey(&micdata, &stainfo->dot11tkiptxmickey.skey[0]);
- }
-
- if (pframe[1] & 1) { /* ToDS == 1 */
- rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
- if (pframe[1] & 2) /* From Ds == 1 */
- rtw_secmicappend(&micdata, &pframe[24], 6);
- else
- rtw_secmicappend(&micdata, &pframe[10], 6);
- } else { /* ToDS == 0 */
- rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
- if (pframe[1] & 2) /* From Ds == 1 */
- rtw_secmicappend(&micdata, &pframe[16], 6);
- else
- rtw_secmicappend(&micdata, &pframe[10], 6);
- }
-
- if (pattrib->qos_en)
- priority[0] = (u8)pxmitframe->attrib.priority;
-
- rtw_secmicappend(&micdata, &priority[0], 4);
-
- payload = pframe;
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = PTR_ALIGN(payload, 4);
-
- payload = payload + pattrib->hdrlen + pattrib->iv_len;
- if ((curfragnum + 1) == pattrib->nr_frags) {
- length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
- rtw_secmicappend(&micdata, payload, length);
- payload = payload + length;
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
- rtw_secmicappend(&micdata, payload, length);
- payload = payload + length + pattrib->icv_len;
- }
- }
- rtw_secgetmic(&micdata, &mic[0]);
- /* add mic code and add the mic code length in last_txcmdsz */
-
- memcpy(payload, &mic[0], 8);
- pattrib->last_txcmdsz += 8;
-
- payload = payload - pattrib->last_txcmdsz + 8;
- }
- }
-
- return _SUCCESS;
-}
-
-static void xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- if (!pattrib->bswenc)
- return;
-
- switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- rtw_wep_encrypt(padapter, pxmitframe);
- break;
- case _TKIP_:
- rtw_tkip_encrypt(padapter, pxmitframe);
- break;
- case _AES_:
- rtw_aes_encrypt(padapter, pxmitframe);
- break;
- default:
- break;
- }
-}
-
-s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
-{
- u16 *qc;
-
- struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- bool qos_option;
- __le16 *fctrl = &pwlanhdr->frame_control;
-
- struct sta_info *psta;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else if (is_multicast_ether_addr(pattrib->ra))
- psta = rtw_get_bcmc_stainfo(padapter);
- else
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
-
- memset(hdr, 0, WLANHDR_OFFSET);
-
- SetFrameSubType(fctrl, pattrib->subtype);
-
- if (!(pattrib->subtype & IEEE80211_FTYPE_DATA))
- return _SUCCESS;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* to_ds = 1, fr_ds = 0; */
- /* Data transfer to AP */
- SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
- qos_option = pqospriv->qos_option;
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* to_ds = 0, fr_ds = 1; */
- SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- qos_option = psta->qos_option;
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- qos_option = psta->qos_option;
- } else {
- return _FAIL;
- }
-
- if (pattrib->mdata)
- SetMData(fctrl);
-
- if (pattrib->encrypt)
- SetPrivacy(fctrl);
-
- if (qos_option) {
- qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
-
- if (pattrib->priority)
- SetPriority(qc, pattrib->priority);
-
- SetEOSP(qc, pattrib->eosp);
-
- SetAckpolicy(qc, pattrib->ack_policy);
- }
-
- /* TODO: fill HT Control Field */
-
- /* Update Seq Num will be handled by f/w */
- if (psta) {
- psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
-
- pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
-
- SetSeqNum(hdr, pattrib->seqnum);
-
- /* check if enable ampdu */
- if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
- if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
- pattrib->ampdu_en = true;
- }
-
- /* re-check if enable ampdu by BA_starting_seqctrl */
- if (pattrib->ampdu_en) {
- u16 tx_seq;
-
- tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
-
- /* check BA_starting_seqctrl */
- if (SN_LESS(pattrib->seqnum, tx_seq)) {
- pattrib->ampdu_en = false;/* AGG BK */
- } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq + 1) & 0xfff;
-
- pattrib->ampdu_en = true;/* AGG EN */
- } else {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum + 1) & 0xfff;
- pattrib->ampdu_en = true;/* AGG EN */
- }
- }
- }
-
- return _SUCCESS;
-}
-
-s32 rtw_txframes_pending(struct adapter *padapter)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- return (!list_empty(&pxmitpriv->be_pending) ||
- !list_empty(&pxmitpriv->bk_pending) ||
- !list_empty(&pxmitpriv->vi_pending) ||
- !list_empty(&pxmitpriv->vo_pending));
-}
-
-s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib)
-{
- struct sta_info *psta;
- struct tx_servq *ptxservq;
- int priority = pattrib->priority;
-
- psta = pattrib->psta;
-
- switch (priority) {
- case 1:
- case 2:
- ptxservq = &psta->sta_xmitpriv.bk_q;
- break;
- case 4:
- case 5:
- ptxservq = &psta->sta_xmitpriv.vi_q;
- break;
- case 6:
- case 7:
- ptxservq = &psta->sta_xmitpriv.vo_q;
- break;
- case 0:
- case 3:
- default:
- ptxservq = &psta->sta_xmitpriv.be_q;
- break;
- }
-
- if (ptxservq)
- return ptxservq->qcnt;
- return 0;
-}
-
-/*
- * This sub-routine will perform all the following:
- *
- * 1. remove 802.3 header.
- * 2. create wlan_header, based on the info in pxmitframe
- * 3. append sta's iv/ext-iv
- * 4. append LLC
- * 5. move frag chunk from pframe to pxmitframe->mem
- * 6. apply sw-encrypt, if necessary.
- */
-s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
-{
- struct pkt_file pktfile;
- s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
- u8 *pframe, *mem_start;
- u8 hw_hdr_offset;
- struct sta_info *psta;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- u8 *pbuf_start;
- bool bmcst = is_multicast_ether_addr(pattrib->ra);
- s32 res = _SUCCESS;
-
- if (!pkt)
- return _FAIL;
-
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
-
- if (!psta)
- return _FAIL;
-
- if (!pxmitframe->buf_addr)
- return _FAIL;
-
- pbuf_start = pxmitframe->buf_addr;
-
- hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
-
- mem_start = pbuf_start + hw_hdr_offset;
-
- if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
- res = _FAIL;
- goto exit;
- }
-
- rtw_open_pktfile(pkt, &pktfile);
- rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
-
- frg_inx = 0;
- frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
-
- while (1) {
- llc_sz = 0;
-
- mpdu_len = frg_len;
-
- pframe = mem_start;
-
- SetMFrag(mem_start);
-
- pframe += pattrib->hdrlen;
- mpdu_len -= pattrib->hdrlen;
-
- /* adding icv, if necessary... */
- if (pattrib->iv_len) {
- switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- break;
- case _TKIP_:
- if (bmcst)
- TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
- case _AES_:
- if (bmcst)
- AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- AES_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
- }
-
- memcpy(pframe, pattrib->iv, pattrib->iv_len);
-
- pframe += pattrib->iv_len;
-
- mpdu_len -= pattrib->iv_len;
- }
-
- if (frg_inx == 0) {
- llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
- pframe += llc_sz;
- mpdu_len -= llc_sz;
- }
-
- if ((pattrib->icv_len > 0) && (pattrib->bswenc))
- mpdu_len -= pattrib->icv_len;
-
- if (bmcst) {
- /* don't do fragment to broadcast/multicast packets */
- mem_sz = rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
- } else {
- mem_sz = rtw_pktfile_read(&pktfile, pframe, mpdu_len);
- }
-
- pframe += mem_sz;
-
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
- memcpy(pframe, pattrib->icv, pattrib->icv_len);
- pframe += pattrib->icv_len;
- }
-
- frg_inx++;
-
- if (bmcst || pktfile.pkt_len == 0) {
- pattrib->nr_frags = frg_inx;
-
- pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
- ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
-
- ClearMFrag(mem_start);
-
- break;
- }
-
- mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
- memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
- }
-
- if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
- res = _FAIL;
- goto exit;
- }
-
- xmitframe_swencrypt(padapter, pxmitframe);
-
- if (!bmcst)
- update_attrib_vcs_info(padapter, pxmitframe);
- else
- pattrib->vcs_mode = NONE_VCS;
-
-exit:
-
- return res;
-}
-
-/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
- * IEEE LLC/SNAP header contains 8 octets
- * First 3 octets comprise the LLC portion
- * SNAP portion, 5 octets, is divided into two fields:
- * Organizationally Unique Identifier(OUI), 3 octets,
- * type, defined by that organization, 2 octets.
- */
-s32 rtw_put_snap(u8 *data, u16 h_proto)
-{
- struct ieee80211_snap_hdr *snap;
- u8 *oui;
-
- snap = (struct ieee80211_snap_hdr *)data;
- snap->dsap = 0xaa;
- snap->ssap = 0xaa;
- snap->ctrl = 0x03;
-
- if (h_proto == 0x8137 || h_proto == 0x80f3)
- oui = P802_1H_OUI;
- else
- oui = RFC1042_OUI;
-
- snap->oui[0] = oui[0];
- snap->oui[1] = oui[1];
- snap->oui[2] = oui[2];
-
- *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
-
- return SNAP_SIZE + sizeof(u16);
-}
-
-void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
-{
- struct sta_info *psta = NULL;
- struct stainfo_stats *pstats = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
- pxmitpriv->tx_bytes += sz;
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pxmitframe->agg_num;
-
- psta = pxmitframe->attrib.psta;
- if (psta) {
- pstats = &psta->sta_stats;
- pstats->tx_pkts += pxmitframe->agg_num;
- pstats->tx_bytes += sz;
- }
- }
-}
-
-struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
-{
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *plist, *phead;
- struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
- unsigned long flags;
-
- spin_lock_irqsave(&pfree_queue->lock, flags);
-
- if (list_empty(&pfree_queue->queue)) {
- pxmitbuf = NULL;
- } else {
- phead = get_list_head(pfree_queue);
-
- plist = phead->next;
-
- pxmitbuf = container_of(plist, struct xmit_buf, list);
-
- list_del_init(&pxmitbuf->list);
- }
-
- if (pxmitbuf) {
- pxmitpriv->free_xmit_extbuf_cnt--;
-
- pxmitbuf->priv_data = NULL;
- /* pxmitbuf->ext_tag = true; */
-
- if (pxmitbuf->sctx)
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
- }
-
- spin_unlock_irqrestore(&pfree_queue->lock, flags);
-
- return pxmitbuf;
-}
-
-s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
-{
- struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
- unsigned long flags;
-
- if (!pxmitbuf)
- return _FAIL;
-
- spin_lock_irqsave(&pfree_queue->lock, flags);
-
- list_del_init(&pxmitbuf->list);
-
- list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
- pxmitpriv->free_xmit_extbuf_cnt++;
-
- spin_unlock_irqrestore(&pfree_queue->lock, flags);
-
- return _SUCCESS;
-}
-
-struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
-{
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *plist, *phead;
- struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- unsigned long flags;
-
- spin_lock_irqsave(&pfree_xmitbuf_queue->lock, flags);
-
- if (list_empty(&pfree_xmitbuf_queue->queue)) {
- pxmitbuf = NULL;
- } else {
- phead = get_list_head(pfree_xmitbuf_queue);
-
- plist = phead->next;
-
- pxmitbuf = container_of(plist, struct xmit_buf, list);
-
- list_del_init(&pxmitbuf->list);
- }
-
- if (pxmitbuf) {
- pxmitpriv->free_xmitbuf_cnt--;
- pxmitbuf->priv_data = NULL;
- if (pxmitbuf->sctx)
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
- }
- spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, flags);
-
- return pxmitbuf;
-}
-
-s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
-{
- struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- unsigned long flags;
-
- if (!pxmitbuf)
- return _FAIL;
-
- if (pxmitbuf->sctx)
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
-
- if (pxmitbuf->ext_tag) {
- rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
- } else {
- spin_lock_irqsave(&pfree_xmitbuf_queue->lock, flags);
-
- list_del_init(&pxmitbuf->list);
-
- list_add_tail(&pxmitbuf->list, get_list_head(pfree_xmitbuf_queue));
-
- pxmitpriv->free_xmitbuf_cnt++;
- spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, flags);
- }
-
- return _SUCCESS;
-}
-
-/*
- * Calling context:
- * 1. OS_TXENTRY
- * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
- *
- * If we turn on USE_RXTHREAD, then, no need for critical section.
- * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
- *
- * Must be very very cautious...
- */
-struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
-{
- /*
- * Please remember to use all the osdep_service api,
- * and lock/unlock or _enter/_exit critical to protect
- * pfree_xmit_queue
- */
-
- struct xmit_frame *pxframe = NULL;
- struct list_head *plist, *phead;
- struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
-
- spin_lock_bh(&pfree_xmit_queue->lock);
-
- if (list_empty(&pfree_xmit_queue->queue))
- goto out;
-
- phead = get_list_head(pfree_xmit_queue);
- plist = phead->next;
- pxframe = container_of(plist, struct xmit_frame, list);
- list_del_init(&pxframe->list);
-
- pxmitpriv->free_xmitframe_cnt--;
-
- pxframe->buf_addr = NULL;
- pxframe->pxmitbuf = NULL;
-
- memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
-
- pxframe->frame_tag = DATA_FRAMETAG;
-
- pxframe->pkt = NULL;
- pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
-
- pxframe->agg_num = 1;
- pxframe->ack_report = 0;
-
-out:
- spin_unlock_bh(&pfree_xmit_queue->lock);
- return pxframe;
-}
-
-s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
-{
- struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
- struct adapter *padapter = pxmitpriv->adapter;
- struct sk_buff *pndis_pkt = NULL;
-
- if (!pxmitframe)
- goto exit;
-
- spin_lock_bh(&pfree_xmit_queue->lock);
-
- list_del_init(&pxmitframe->list);
-
- if (pxmitframe->pkt) {
- pndis_pkt = pxmitframe->pkt;
- pxmitframe->pkt = NULL;
- }
-
- list_add_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
-
- pxmitpriv->free_xmitframe_cnt++;
-
- spin_unlock_bh(&pfree_xmit_queue->lock);
-
- if (pndis_pkt)
- rtw_pkt_complete(padapter, pndis_pkt);
-
-exit:
-
- return _SUCCESS;
-}
-
-void rtw_free_xmitframe_list(struct xmit_priv *pxmitpriv, struct list_head *xframe_list)
-{
- struct xmit_frame *pxmitframe, *tmp_xmitframe;
-
- list_for_each_entry_safe(pxmitframe, tmp_xmitframe, xframe_list, list)
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-}
-
-struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i)
-{
- struct hw_xmit *phwxmit;
- struct tx_servq *ptxservq, *tmp_txservq;
- struct list_head *xframe_list;
- struct xmit_frame *pxmitframe = NULL;
- struct adapter *padapter = pxmitpriv->adapter;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- int i, inx[] = { 0, 1, 2, 3 };
-
- if (pregpriv->wifi_spec == 1) {
- for (i = 0; i < ARRAY_SIZE(inx); i++)
- inx[i] = pxmitpriv->wmm_para_seq[i];
- }
-
- spin_lock_bh(&pxmitpriv->lock);
-
- for (i = 0; i < HWXMIT_ENTRY; i++) {
- phwxmit = phwxmit_i + inx[i];
- list_for_each_entry_safe(ptxservq, tmp_txservq, phwxmit->sta_list, tx_pending) {
- xframe_list = &ptxservq->sta_pending;
- if (list_empty(xframe_list))
- continue;
-
- pxmitframe = container_of(xframe_list->next, struct xmit_frame, list);
- list_del_init(&pxmitframe->list);
-
- phwxmit->accnt--;
- ptxservq->qcnt--;
-
- /* Remove sta node when there are no pending packets. */
- if (list_empty(xframe_list))
- list_del_init(&ptxservq->tx_pending);
-
- goto exit;
- }
- }
-exit:
- spin_unlock_bh(&pxmitpriv->lock);
-
- return pxmitframe;
-}
-
-struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, int up, u8 *ac)
-{
- struct tx_servq *ptxservq;
-
- switch (up) {
- case 1:
- case 2:
- ptxservq = &psta->sta_xmitpriv.bk_q;
- *(ac) = 3;
- break;
- case 4:
- case 5:
- ptxservq = &psta->sta_xmitpriv.vi_q;
- *(ac) = 1;
- break;
- case 6:
- case 7:
- ptxservq = &psta->sta_xmitpriv.vo_q;
- *(ac) = 0;
- break;
- case 0:
- case 3:
- default:
- ptxservq = &psta->sta_xmitpriv.be_q;
- *(ac) = 2;
- break;
- }
-
- return ptxservq;
-}
-
-/*
- * Will enqueue pxmitframe to the proper queue,
- * and indicate it to xx_pending list.....
- */
-s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- u8 ac_index;
- struct sta_info *psta;
- struct tx_servq *ptxservq;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
- int res = _SUCCESS;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
-
- if (!psta) {
- res = _FAIL;
- goto exit;
- }
-
- ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
-
- if (list_empty(&ptxservq->tx_pending))
- list_add_tail(&ptxservq->tx_pending, phwxmits[ac_index].sta_list);
-
- list_add_tail(&pxmitframe->list, &ptxservq->sta_pending);
- ptxservq->qcnt++;
- phwxmits[ac_index].accnt++;
-exit:
-
- return res;
-}
-
-int rtw_alloc_hwxmits(struct adapter *padapter)
-{
- struct hw_xmit *hwxmits;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- pxmitpriv->hwxmits = kcalloc(HWXMIT_ENTRY, sizeof(struct hw_xmit), GFP_KERNEL);
- if (!pxmitpriv->hwxmits)
- return -ENOMEM;
-
- hwxmits = pxmitpriv->hwxmits;
-
- hwxmits[0].sta_list = &pxmitpriv->vo_pending;
- hwxmits[1].sta_list = &pxmitpriv->vi_pending;
- hwxmits[2].sta_list = &pxmitpriv->be_pending;
- hwxmits[3].sta_list = &pxmitpriv->bk_pending;
-
- return 0;
-}
-
-static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
-{
- struct sk_buff *skb = *pskb;
- int res, is_vlan_tag = 0, i, do_nat25 = 1;
- unsigned short vlan_hdr = 0;
- void *br_port = NULL;
-
- rcu_read_lock();
- br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
- spin_lock_bh(&padapter->br_ext_lock);
- if (!(skb->data[0] & 1) && br_port &&
- memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) &&
- *((__be16 *)(skb->data + ETH_ALEN * 2)) != htons(ETH_P_8021Q) &&
- *((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP) &&
- !memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN) && padapter->scdb_entry) {
- memcpy(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN);
- padapter->scdb_entry->ageing_timer = jiffies;
- spin_unlock_bh(&padapter->br_ext_lock);
- } else {
- if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_8021Q)) {
- is_vlan_tag = 1;
- vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2));
- for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2));
- skb_pull(skb, 4);
- }
- if (!memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) &&
- (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP)))
- memcpy(padapter->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
-
- if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == htons(ETH_P_IP)) {
- if (memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN)) {
- padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
- skb->data + WLAN_ETHHDR_LEN + 12);
- if (padapter->scdb_entry) {
- memcpy(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN);
- memcpy(padapter->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
- padapter->scdb_entry->ageing_timer = jiffies;
- do_nat25 = 0;
- }
- } else {
- if (padapter->scdb_entry) {
- padapter->scdb_entry->ageing_timer = jiffies;
- do_nat25 = 0;
- } else {
- memset(padapter->scdb_mac, 0, ETH_ALEN);
- memset(padapter->scdb_ip, 0, 4);
- }
- }
- }
- spin_unlock_bh(&padapter->br_ext_lock);
- if (do_nat25) {
- if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
- struct sk_buff *newskb;
-
- if (is_vlan_tag) {
- skb_push(skb, 4);
- for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
- *((__be16 *)(skb->data + ETH_ALEN * 2)) = htons(ETH_P_8021Q);
- *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr;
- }
-
- newskb = skb_copy(skb, GFP_ATOMIC);
- if (!newskb)
- return -1;
- dev_kfree_skb_any(skb);
-
- *pskb = skb = newskb;
- if (is_vlan_tag) {
- vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2));
- for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2));
- skb_pull(skb, 4);
- }
- }
-
- res = skb_linearize(skb);
- if (res < 0)
- return -1;
-
- res = nat25_db_handle(padapter, skb, NAT25_INSERT);
- if (res < 0) {
- if (res == -2)
- return -1;
-
- return 0;
- }
- }
-
- memcpy(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN);
-
- dhcp_flag_bcast(padapter, skb);
-
- if (is_vlan_tag) {
- skb_push(skb, 4);
- for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
- *((__be16 *)(skb->data + ETH_ALEN * 2)) = htons(ETH_P_8021Q);
- *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr;
- }
- }
-
- /* check if SA is equal to our MAC */
- if (memcmp(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN))
- return -1;
-
- return 0;
-}
-
-u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
-{
- u32 addr;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- switch (pattrib->qsel) {
- case 0:
- case 3:
- addr = BE_QUEUE_INX;
- break;
- case 1:
- case 2:
- addr = BK_QUEUE_INX;
- break;
- case 4:
- case 5:
- addr = VI_QUEUE_INX;
- break;
- case 6:
- case 7:
- addr = VO_QUEUE_INX;
- break;
- case 0x10:
- addr = BCN_QUEUE_INX;
- break;
- case 0x11:/* BC/MC in PS (HIQ) */
- addr = HIGH_QUEUE_INX;
- break;
- case 0x12:
- default:
- addr = MGT_QUEUE_INX;
- break;
- }
-
- return addr;
-}
-
-/*
- * The main transmit(tx) entry
- *
- * Return
- * 1 enqueue
- * 0 success, hardware will handle this xmit frame(packet)
- * <0 fail
- */
-s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_frame *pxmitframe = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- s32 res;
-
- pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
- if (!pxmitframe)
- return -1;
-
- if (rcu_access_pointer(padapter->pnetdev->rx_handler_data) &&
- check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
- res = rtw_br_client_tx(padapter, ppkt);
- if (res == -1) {
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
- return -1;
- }
- }
-
- res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
-
- if (res == _FAIL) {
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
- return -1;
- }
- pxmitframe->pkt = *ppkt;
-
- rtw_led_control(padapter, LED_CTL_TX);
-
- pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
-
- spin_lock_bh(&pxmitpriv->lock);
- if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
- spin_unlock_bh(&pxmitpriv->lock);
- return 1;
- }
- spin_unlock_bh(&pxmitpriv->lock);
-
- if (!rtl8188eu_hal_xmit(padapter, pxmitframe))
- return 1;
-
- return 0;
-}
-
-int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- int ret = false;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- bool bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return ret;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
-
- if (!psta)
- return ret;
-
- if (pattrib->triggered == 1) {
- if (bmcst)
- pattrib->qsel = 0x11;/* HIQ */
- return ret;
- }
-
- if (bmcst) {
- spin_lock_bh(&psta->sleep_q.lock);
-
- if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */
- list_del_init(&pxmitframe->list);
-
- list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
-
- psta->sleepq_len++;
-
- pstapriv->tim_bitmap |= BIT(0);/* */
- pstapriv->sta_dz_bitmap |= BIT(0);
- /* tx bc/mc packets after update bcn */
- update_beacon(padapter, _TIM_IE_, NULL, false);
-
- ret = true;
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-
- return ret;
- }
-
- spin_lock_bh(&psta->sleep_q.lock);
-
- if (psta->state & WIFI_SLEEP_STATE) {
- u8 wmmps_ac = 0;
-
- if (pstapriv->sta_dz_bitmap & BIT(psta->aid)) {
- list_del_init(&pxmitframe->list);
-
- list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
-
- psta->sleepq_len++;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
-
- if (wmmps_ac)
- psta->sleepq_ac_len++;
-
- if (((psta->has_legacy_ac) && (!wmmps_ac)) ||
- ((!psta->has_legacy_ac) && (wmmps_ac))) {
- pstapriv->tim_bitmap |= BIT(psta->aid);
-
- if (psta->sleepq_len == 1) {
- /* update BCN for TIM IE */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- }
- ret = true;
- }
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-
- return ret;
-}
-
-static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct list_head *phead)
-{
- struct list_head *plist;
- u8 ac_index;
- struct tx_servq *ptxservq;
- struct pkt_attrib *pattrib;
- struct xmit_frame *pxmitframe;
- struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
-
- plist = phead->next;
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = plist->next;
-
- xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
-
- pattrib = &pxmitframe->attrib;
-
- ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
-
- ptxservq->qcnt--;
- phwxmits[ac_index].accnt--;
- }
-}
-
-void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
-{
- struct sta_info *psta_bmc;
- struct sta_xmit_priv *pstaxmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- pstaxmitpriv = &psta->sta_xmitpriv;
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
- spin_lock_bh(&pxmitpriv->lock);
-
- psta->state |= WIFI_SLEEP_STATE;
-
- pstapriv->sta_dz_bitmap |= BIT(psta->aid);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&pstaxmitpriv->vo_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&pstaxmitpriv->vi_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&pstaxmitpriv->bk_q.tx_pending);
-
- /* for BC/MC Frames */
- pstaxmitpriv = &psta_bmc->sta_xmitpriv;
- dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
-
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
-{
- u8 update_mask = 0, wmmps_ac = 0;
- struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- spin_lock_bh(&psta->sleep_q.lock);
-
- xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- list_del_init(&pxmitframe->list);
-
- switch (pxmitframe->attrib.priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- psta->sleepq_len--;
- if (psta->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- if (wmmps_ac) {
- psta->sleepq_ac_len--;
- if (psta->sleepq_ac_len > 0) {
- pxmitframe->attrib.mdata = 1;
- pxmitframe->attrib.eosp = 0;
- } else {
- pxmitframe->attrib.mdata = 0;
- pxmitframe->attrib.eosp = 1;
- }
- }
-
- pxmitframe->attrib.triggered = 1;
-
- spin_unlock_bh(&psta->sleep_q.lock);
- if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_xmit_complete(padapter, pxmitframe);
- spin_lock_bh(&psta->sleep_q.lock);
- }
-
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- update_mask = BIT(0);
-
- if (psta->state & WIFI_SLEEP_STATE)
- psta->state ^= WIFI_SLEEP_STATE;
-
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
-
- pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo(padapter);
- if (!psta_bmc)
- return;
-
- if ((pstapriv->sta_dz_bitmap & 0xfffe) == 0x0) { /* no any sta in ps mode */
- spin_lock_bh(&psta_bmc->sleep_q.lock);
-
- xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- list_del_init(&pxmitframe->list);
-
- psta_bmc->sleepq_len--;
- if (psta_bmc->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
-
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_xmit_complete(padapter, pxmitframe);
- spin_lock_bh(&psta_bmc->sleep_q.lock);
- }
-
- if (psta_bmc->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(0);
- pstapriv->sta_dz_bitmap &= ~BIT(0);
-
- update_mask |= BIT(1);
- }
-
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- }
-
- if (update_mask)
- update_beacon(padapter, _TIM_IE_, NULL, false);
-}
-
-void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
-{
- u8 wmmps_ac = 0;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- spin_lock_bh(&psta->sleep_q.lock);
-
- xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- switch (pxmitframe->attrib.priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- if (!wmmps_ac)
- continue;
-
- list_del_init(&pxmitframe->list);
-
- psta->sleepq_len--;
- psta->sleepq_ac_len--;
-
- if (psta->sleepq_ac_len > 0) {
- pxmitframe->attrib.mdata = 1;
- pxmitframe->attrib.eosp = 0;
- } else {
- pxmitframe->attrib.mdata = 0;
- pxmitframe->attrib.eosp = 1;
- }
-
- pxmitframe->attrib.triggered = 1;
-
- if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_xmit_complete(padapter, pxmitframe);
-
- if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
- pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
- /* update BCN for TIM IE */
- update_beacon(padapter, _TIM_IE_, NULL, false);
- }
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-}
-
-void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
-{
- sctx->timeout_ms = timeout_ms;
- sctx->submit_time = jiffies;
- init_completion(&sctx->done);
- sctx->status = RTW_SCTX_SUBMITTED;
-}
-
-int rtw_sctx_wait(struct submit_ctx *sctx)
-{
- int ret = _FAIL;
- unsigned long expire;
- int status = 0;
-
- expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_timeout(&sctx->done, expire))
- /* timeout, do something?? */
- status = RTW_SCTX_DONE_TIMEOUT;
- else
- status = sctx->status;
-
- if (status == RTW_SCTX_DONE_SUCCESS)
- ret = _SUCCESS;
-
- return ret;
-}
-
-void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
-{
- if (*sctx) {
- (*sctx)->status = status;
- complete(&((*sctx)->done));
- *sctx = NULL;
- }
-}
-
-int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
-{
- struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
-
- pack_tx_ops->submit_time = jiffies;
- pack_tx_ops->timeout_ms = timeout_ms;
- pack_tx_ops->status = RTW_SCTX_SUBMITTED;
-
- return rtw_sctx_wait(pack_tx_ops);
-}
-
-void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
-{
- struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
-
- if (pxmitpriv->ack_tx)
- rtw_sctx_done_err(&pack_tx_ops, status);
-}
-
-static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u16 queue;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- /* No free space for Tx, tx_worker is too slow */
- if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
- netif_stop_subqueue(padapter->pnetdev, queue);
- } else {
- if (pxmitpriv->free_xmitframe_cnt <= 4) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
- netif_stop_subqueue(padapter->pnetdev, queue);
- }
- }
-}
-
-static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct list_head *phead, *plist;
- struct sk_buff *newskb;
- struct sta_info *psta = NULL;
- s32 res;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- /* avoid come from STA1 and send back STA1 */
- if (!memcmp(psta->hwaddr, &skb->data[6], 6))
- continue;
-
- newskb = skb_copy(skb, GFP_ATOMIC);
-
- if (newskb) {
- memcpy(newskb->data, psta->hwaddr, 6);
- res = rtw_xmit(padapter, &newskb);
- if (res < 0) {
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(newskb);
- } else {
- pxmitpriv->tx_pkts++;
- }
- } else {
- pxmitpriv->tx_drop++;
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- return false; /* Caller shall tx this multicast frame via normal way. */
- }
- }
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- dev_kfree_skb_any(skb);
- return true;
-}
-
-netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- s32 res = 0;
-
- if (!rtw_if_up(padapter))
- goto drop_packet;
-
- rtw_check_xmit_resource(padapter, pkt);
-
- if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
- (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
- (padapter->registrypriv.wifi_spec == 0)) {
- if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
- res = rtw_mlcst2unicst(padapter, pkt);
- if (res)
- goto exit;
- }
- }
-
- res = rtw_xmit(padapter, &pkt);
- if (res < 0)
- goto drop_packet;
-
- pxmitpriv->tx_pkts++;
- goto exit;
-
-drop_packet:
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(pkt);
-
-exit:
- return NETDEV_TX_OK;
-}
diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
deleted file mode 100644
index 1e04de3a6622..000000000000
--- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
+++ /dev/null
@@ -1,654 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) Realtek Semiconductor Corp. */
-
-#include "../include/drv_types.h"
-
-static u8 RETRY_PENALTY[PERENTRY][RETRYSIZE + 1] = {
- {5, 4, 3, 2, 0, 3}, /* 92 , idx = 0 */
- {6, 5, 4, 3, 0, 4}, /* 86 , idx = 1 */
- {6, 5, 4, 2, 0, 4}, /* 81 , idx = 2 */
- {8, 7, 6, 4, 0, 6}, /* 75 , idx = 3 */
- {10, 9, 8, 6, 0, 8}, /* 71 , idx = 4 */
- {10, 9, 8, 4, 0, 8}, /* 66 , idx = 5 */
- {10, 9, 8, 2, 0, 8}, /* 62 , idx = 6 */
- {10, 9, 8, 0, 0, 8}, /* 59 , idx = 7 */
- {18, 17, 16, 8, 0, 16}, /* 53 , idx = 8 */
- {26, 25, 24, 16, 0, 24}, /* 50 , idx = 9 */
- {34, 33, 32, 24, 0, 32}, /* 47 , idx = 0x0a */
- {34, 31, 28, 20, 0, 32}, /* 43 , idx = 0x0b */
- {34, 31, 27, 18, 0, 32}, /* 40 , idx = 0x0c */
- {34, 31, 26, 16, 0, 32}, /* 37 , idx = 0x0d */
- {34, 30, 22, 16, 0, 32}, /* 32 , idx = 0x0e */
- {34, 30, 24, 16, 0, 32}, /* 26 , idx = 0x0f */
- {49, 46, 40, 16, 0, 48}, /* 20 , idx = 0x10 */
- {49, 45, 32, 0, 0, 48}, /* 17 , idx = 0x11 */
- {49, 45, 22, 18, 0, 48}, /* 15 , idx = 0x12 */
- {49, 40, 24, 16, 0, 48}, /* 12 , idx = 0x13 */
- {49, 32, 18, 12, 0, 48}, /* 9 , idx = 0x14 */
- {49, 22, 18, 14, 0, 48}, /* 6 , idx = 0x15 */
- {49, 16, 16, 0, 0, 48}
- }; /* 3, idx = 0x16 */
-
-static u8 PT_PENALTY[RETRYSIZE + 1] = {34, 31, 30, 24, 0, 32};
-
-/* wilson modify */
-static u8 RETRY_PENALTY_IDX[2][RATESIZE] = {
- {4, 4, 4, 5, 4, 4, 5, 7, 7, 7, 8, 0x0a, /* SS>TH */
- 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
- 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f}, /* 0329 R01 */
- {0x0a, 0x0a, 0x0b, 0x0c, 0x0a,
- 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x14, /* SS<TH */
- 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x15,
- 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13}
- };
-
-static u8 RETRY_PENALTY_UP_IDX[RATESIZE] = {
- 0x0c, 0x0d, 0x0d, 0x0f, 0x0d, 0x0e,
- 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14, /* SS>TH */
- 0x0f, 0x10, 0x10, 0x12, 0x12, 0x13, 0x14, 0x15,
- 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15};
-
-static u8 RSSI_THRESHOLD[RATESIZE] = {
- 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0x24, 0x26, 0x2a,
- 0x18, 0x1a, 0x1d, 0x1f, 0x21, 0x27, 0x29, 0x2a,
- 0, 0, 0, 0x1f, 0x23, 0x28, 0x2a, 0x2c};
-
-static u16 N_THRESHOLD_HIGH[RATESIZE] = {
- 4, 4, 8, 16,
- 24, 36, 48, 72, 96, 144, 192, 216,
- 60, 80, 100, 160, 240, 400, 560, 640,
- 300, 320, 480, 720, 1000, 1200, 1600, 2000};
-static u16 N_THRESHOLD_LOW[RATESIZE] = {
- 2, 2, 4, 8,
- 12, 18, 24, 36, 48, 72, 96, 108,
- 30, 40, 50, 80, 120, 200, 280, 320,
- 150, 160, 240, 360, 500, 600, 800, 1000};
-
-static u8 DROPING_NECESSARY[RATESIZE] = {
- 1, 1, 1, 1,
- 1, 2, 3, 4, 5, 6, 7, 8,
- 1, 2, 3, 4, 5, 6, 7, 8,
- 5, 6, 7, 8, 9, 10, 11, 12};
-
-static u8 PendingForRateUpFail[5] = {2, 10, 24, 40, 60};
-static u16 DynamicTxRPTTiming[6] = {
- 0x186a, 0x30d4, 0x493e, 0x61a8, 0x7a12, 0x927c}; /* 200ms-1200ms */
-
-/* End Rate adaptive parameters */
-
-static void odm_SetTxRPTTiming_8188E(
- struct odm_dm_struct *dm_odm,
- struct odm_ra_info *pRaInfo,
- u8 extend
- )
-{
- u8 idx = 0;
-
- for (idx = 0; idx < 5; idx++)
- if (DynamicTxRPTTiming[idx] == pRaInfo->RptTime)
- break;
-
- if (extend == 0) { /* back to default timing */
- idx = 0; /* 200ms */
- } else if (extend == 1) {/* increase the timing */
- idx += 1;
- if (idx > 5)
- idx = 5;
- } else if (extend == 2) {/* decrease the timing */
- if (idx != 0)
- idx -= 1;
- }
- pRaInfo->RptTime = DynamicTxRPTTiming[idx];
-}
-
-static int odm_RateDown_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
-{
- u8 RateID, LowestRate, HighestRate;
- u8 i;
-
- if (NULL == pRaInfo)
- return -1;
- RateID = pRaInfo->PreRate;
- LowestRate = pRaInfo->LowestRate;
- HighestRate = pRaInfo->HighestRate;
-
- if (RateID > HighestRate) {
- RateID = HighestRate;
- } else if (pRaInfo->RateSGI) {
- pRaInfo->RateSGI = 0;
- } else if (RateID > LowestRate) {
- if (RateID > 0) {
- for (i = RateID - 1; i > LowestRate; i--) {
- if (pRaInfo->RAUseRate & BIT(i)) {
- RateID = i;
- goto RateDownFinish;
- }
- }
- }
- } else if (RateID <= LowestRate) {
- RateID = LowestRate;
- }
-RateDownFinish:
- if (pRaInfo->RAWaitingCounter == 1) {
- pRaInfo->RAWaitingCounter += 1;
- pRaInfo->RAPendingCounter += 1;
- } else if (pRaInfo->RAWaitingCounter == 0) {
- ;
- } else {
- pRaInfo->RAWaitingCounter = 0;
- pRaInfo->RAPendingCounter = 0;
- }
-
- if (pRaInfo->RAPendingCounter >= 4)
- pRaInfo->RAPendingCounter = 4;
-
- pRaInfo->DecisionRate = RateID;
- odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 2);
- return 0;
-}
-
-static int odm_RateUp_8188E(
- struct odm_dm_struct *dm_odm,
- struct odm_ra_info *pRaInfo
- )
-{
- u8 RateID, HighestRate;
- u8 i;
-
- if (NULL == pRaInfo)
- return -1;
- RateID = pRaInfo->PreRate;
- HighestRate = pRaInfo->HighestRate;
- if (pRaInfo->RAWaitingCounter == 1) {
- pRaInfo->RAWaitingCounter = 0;
- pRaInfo->RAPendingCounter = 0;
- } else if (pRaInfo->RAWaitingCounter > 1) {
- pRaInfo->PreRssiStaRA = pRaInfo->RssiStaRA;
- goto RateUpfinish;
- }
- odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 0);
-
- if (RateID < HighestRate) {
- for (i = RateID + 1; i <= HighestRate; i++) {
- if (pRaInfo->RAUseRate & BIT(i)) {
- RateID = i;
- goto RateUpfinish;
- }
- }
- } else if (RateID == HighestRate) {
- if (pRaInfo->SGIEnable && (pRaInfo->RateSGI != 1))
- pRaInfo->RateSGI = 1;
- else if ((pRaInfo->SGIEnable) != 1)
- pRaInfo->RateSGI = 0;
- } else {
- RateID = HighestRate;
- }
-RateUpfinish:
- if (pRaInfo->RAWaitingCounter == (4 + PendingForRateUpFail[pRaInfo->RAPendingCounter]))
- pRaInfo->RAWaitingCounter = 0;
- else
- pRaInfo->RAWaitingCounter++;
-
- pRaInfo->DecisionRate = RateID;
- return 0;
-}
-
-static void odm_ResetRaCounter_8188E(struct odm_ra_info *pRaInfo)
-{
- u8 RateID;
-
- RateID = pRaInfo->DecisionRate;
- pRaInfo->NscUp = (N_THRESHOLD_HIGH[RateID] + N_THRESHOLD_LOW[RateID]) >> 1;
- pRaInfo->NscDown = (N_THRESHOLD_HIGH[RateID] + N_THRESHOLD_LOW[RateID]) >> 1;
-}
-
-static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
- struct odm_ra_info *pRaInfo
- )
-{
- u8 RateID = 0, RtyPtID = 0, PenaltyID1 = 0, PenaltyID2 = 0;
- /* u32 pool_retry; */
- static u8 DynamicTxRPTTimingCounter;
-
- if (pRaInfo->Active && (pRaInfo->TOTAL > 0)) { /* STA used and data packet exits */
- if ((pRaInfo->RssiStaRA < (pRaInfo->PreRssiStaRA - 3)) ||
- (pRaInfo->RssiStaRA > (pRaInfo->PreRssiStaRA + 3))) {
- pRaInfo->RAWaitingCounter = 0;
- pRaInfo->RAPendingCounter = 0;
- }
- /* Start RA decision */
- if (pRaInfo->PreRate > pRaInfo->HighestRate)
- RateID = pRaInfo->HighestRate;
- else
- RateID = pRaInfo->PreRate;
- if (pRaInfo->RssiStaRA > RSSI_THRESHOLD[RateID])
- RtyPtID = 0;
- else
- RtyPtID = 1;
- PenaltyID1 = RETRY_PENALTY_IDX[RtyPtID][RateID]; /* TODO by page */
-
- pRaInfo->NscDown += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID1][0];
- pRaInfo->NscDown += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID1][1];
- pRaInfo->NscDown += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID1][2];
- pRaInfo->NscDown += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID1][3];
- pRaInfo->NscDown += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID1][4];
- if (pRaInfo->NscDown > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5]))
- pRaInfo->NscDown -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5];
- else
- pRaInfo->NscDown = 0;
-
- /* rate up */
- PenaltyID2 = RETRY_PENALTY_UP_IDX[RateID];
- pRaInfo->NscUp += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID2][0];
- pRaInfo->NscUp += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID2][1];
- pRaInfo->NscUp += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID2][2];
- pRaInfo->NscUp += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID2][3];
- pRaInfo->NscUp += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID2][4];
- if (pRaInfo->NscUp > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5]))
- pRaInfo->NscUp -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5];
- else
- pRaInfo->NscUp = 0;
-
- if ((pRaInfo->NscDown < N_THRESHOLD_LOW[RateID]) ||
- (pRaInfo->DROP > DROPING_NECESSARY[RateID]))
- odm_RateDown_8188E(dm_odm, pRaInfo);
- else if (pRaInfo->NscUp > N_THRESHOLD_HIGH[RateID])
- odm_RateUp_8188E(dm_odm, pRaInfo);
-
- if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
- pRaInfo->DecisionRate = pRaInfo->HighestRate;
-
- if ((pRaInfo->DecisionRate) == (pRaInfo->PreRate))
- DynamicTxRPTTimingCounter += 1;
- else
- DynamicTxRPTTimingCounter = 0;
-
- if (DynamicTxRPTTimingCounter >= 4) {
- odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 1);
- DynamicTxRPTTimingCounter = 0;
- }
-
- pRaInfo->PreRate = pRaInfo->DecisionRate; /* YJ, add, 120120 */
-
- odm_ResetRaCounter_8188E(pRaInfo);
- }
-}
-
-static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
-{ /* Wilson 2011/10/26 */
- u32 MaskFromReg;
- s8 i;
- int res;
-
- switch (pRaInfo->RateID) {
- case RATR_INX_WIRELESS_NGB:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0f8ff015;
- break;
- case RATR_INX_WIRELESS_NG:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0f8ff010;
- break;
- case RATR_INX_WIRELESS_NB:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0f8ff005;
- break;
- case RATR_INX_WIRELESS_N:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0f8ff000;
- break;
- case RATR_INX_WIRELESS_GB:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x00000ff5;
- break;
- case RATR_INX_WIRELESS_G:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x00000ff0;
- break;
- case RATR_INX_WIRELESS_B:
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0000000d;
- break;
- case 12:
- res = rtw_read32(dm_odm->Adapter, REG_ARFR0, &MaskFromReg);
- if (res)
- return res;
-
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
- break;
- case 13:
- res = rtw_read32(dm_odm->Adapter, REG_ARFR1, &MaskFromReg);
- if (res)
- return res;
-
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
- break;
- case 14:
- res = rtw_read32(dm_odm->Adapter, REG_ARFR2, &MaskFromReg);
- if (res)
- return res;
-
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
- break;
- case 15:
- res = rtw_read32(dm_odm->Adapter, REG_ARFR3, &MaskFromReg);
- if (res)
- return res;
-
- pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
- break;
- default:
- pRaInfo->RAUseRate = (pRaInfo->RateMask);
- break;
- }
- /* Highest rate */
- if (pRaInfo->RAUseRate) {
- for (i = RATESIZE; i >= 0; i--) {
- if ((pRaInfo->RAUseRate) & BIT(i)) {
- pRaInfo->HighestRate = i;
- break;
- }
- }
- } else {
- pRaInfo->HighestRate = 0;
- }
- /* Lowest rate */
- if (pRaInfo->RAUseRate) {
- for (i = 0; i < RATESIZE; i++) {
- if ((pRaInfo->RAUseRate) & BIT(i)) {
- pRaInfo->LowestRate = i;
- break;
- }
- }
- } else {
- pRaInfo->LowestRate = 0;
- }
- if (pRaInfo->HighestRate > 0x13)
- pRaInfo->PTModeSS = 3;
- else if (pRaInfo->HighestRate > 0x0b)
- pRaInfo->PTModeSS = 2;
- else if (pRaInfo->HighestRate > 0x03)
- pRaInfo->PTModeSS = 1;
- else
- pRaInfo->PTModeSS = 0;
-
- if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
- pRaInfo->DecisionRate = pRaInfo->HighestRate;
-
- return 0;
-}
-
-static void odm_PTTryState_8188E(struct odm_ra_info *pRaInfo)
-{
- pRaInfo->PTTryState = 0;
- switch (pRaInfo->PTModeSS) {
- case 3:
- if (pRaInfo->DecisionRate >= 0x19)
- pRaInfo->PTTryState = 1;
- break;
- case 2:
- if (pRaInfo->DecisionRate >= 0x11)
- pRaInfo->PTTryState = 1;
- break;
- case 1:
- if (pRaInfo->DecisionRate >= 0x0a)
- pRaInfo->PTTryState = 1;
- break;
- case 0:
- if (pRaInfo->DecisionRate >= 0x03)
- pRaInfo->PTTryState = 1;
- break;
- default:
- pRaInfo->PTTryState = 0;
- break;
- }
-
- if (pRaInfo->RssiStaRA < 48) {
- pRaInfo->PTStage = 0;
- } else if (pRaInfo->PTTryState == 1) {
- if ((pRaInfo->PTStopCount >= 10) ||
- (pRaInfo->PTPreRssi > pRaInfo->RssiStaRA + 5) ||
- (pRaInfo->PTPreRssi < pRaInfo->RssiStaRA - 5) ||
- (pRaInfo->DecisionRate != pRaInfo->PTPreRate)) {
- if (pRaInfo->PTStage == 0)
- pRaInfo->PTStage = 1;
- else if (pRaInfo->PTStage == 1)
- pRaInfo->PTStage = 3;
- else
- pRaInfo->PTStage = 5;
-
- pRaInfo->PTPreRssi = pRaInfo->RssiStaRA;
- pRaInfo->PTStopCount = 0;
- } else {
- pRaInfo->RAstage = 0;
- pRaInfo->PTStopCount++;
- }
- } else {
- pRaInfo->PTStage = 0;
- pRaInfo->RAstage = 0;
- }
- pRaInfo->PTPreRate = pRaInfo->DecisionRate;
-}
-
-static void odm_PTDecision_8188E(struct odm_ra_info *pRaInfo)
-{
- u8 j;
- u8 temp_stage;
- u32 numsc;
- u32 num_total;
- u8 stage_id;
-
- numsc = 0;
- num_total = pRaInfo->TOTAL * PT_PENALTY[5];
- for (j = 0; j <= 4; j++) {
- numsc += pRaInfo->RTY[j] * PT_PENALTY[j];
- if (numsc > num_total)
- break;
- }
-
- j = j >> 1;
- temp_stage = (pRaInfo->PTStage + 1) >> 1;
- if (temp_stage > j)
- stage_id = temp_stage - j;
- else
- stage_id = 0;
-
- pRaInfo->PTSmoothFactor = (pRaInfo->PTSmoothFactor >> 1) + (pRaInfo->PTSmoothFactor >> 2) + stage_id * 16 + 2;
- if (pRaInfo->PTSmoothFactor > 192)
- pRaInfo->PTSmoothFactor = 192;
- stage_id = pRaInfo->PTSmoothFactor >> 6;
- temp_stage = stage_id * 2;
- if (temp_stage != 0)
- temp_stage -= 1;
- if (pRaInfo->DROP > 3)
- temp_stage = 0;
- pRaInfo->PTStage = temp_stage;
-}
-
-static void
-odm_RATxRPTTimerSetting(
- struct odm_dm_struct *dm_odm,
- u16 minRptTime
-)
-{
- if (dm_odm->CurrminRptTime != minRptTime) {
- rtw_rpt_timer_cfg_cmd(dm_odm->Adapter, minRptTime);
- dm_odm->CurrminRptTime = minRptTime;
- }
-}
-
-int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
-{
- struct odm_ra_info *pRaInfo = &dm_odm->RAInfo[macid];
- u8 WirelessMode = 0xFF; /* invalid value */
- u8 max_rate_idx = 0x13; /* MCS7 */
- if (dm_odm->pWirelessMode)
- WirelessMode = *dm_odm->pWirelessMode;
-
- if (WirelessMode != 0xFF) {
- if (WirelessMode & ODM_WM_N24G)
- max_rate_idx = 0x13;
- else if (WirelessMode & ODM_WM_G)
- max_rate_idx = 0x0b;
- else if (WirelessMode & ODM_WM_B)
- max_rate_idx = 0x03;
- }
-
- pRaInfo->DecisionRate = max_rate_idx;
- pRaInfo->PreRate = max_rate_idx;
- pRaInfo->HighestRate = max_rate_idx;
- pRaInfo->LowestRate = 0;
- pRaInfo->RateID = 0;
- pRaInfo->RateMask = 0xffffffff;
- pRaInfo->RssiStaRA = 0;
- pRaInfo->PreRssiStaRA = 0;
- pRaInfo->SGIEnable = 0;
- pRaInfo->RAUseRate = 0xffffffff;
- pRaInfo->NscDown = (N_THRESHOLD_HIGH[0x13] + N_THRESHOLD_LOW[0x13]) / 2;
- pRaInfo->NscUp = (N_THRESHOLD_HIGH[0x13] + N_THRESHOLD_LOW[0x13]) / 2;
- pRaInfo->RateSGI = 0;
- pRaInfo->Active = 1; /* Active is not used at present. by page, 110819 */
- pRaInfo->RptTime = 0x927c;
- pRaInfo->DROP = 0;
- pRaInfo->RTY[0] = 0;
- pRaInfo->RTY[1] = 0;
- pRaInfo->RTY[2] = 0;
- pRaInfo->RTY[3] = 0;
- pRaInfo->RTY[4] = 0;
- pRaInfo->TOTAL = 0;
- pRaInfo->RAWaitingCounter = 0;
- pRaInfo->RAPendingCounter = 0;
- pRaInfo->PTActive = 1; /* Active when this STA is use */
- pRaInfo->PTTryState = 0;
- pRaInfo->PTStage = 5; /* Need to fill into HW_PWR_STATUS */
- pRaInfo->PTSmoothFactor = 192;
- pRaInfo->PTStopCount = 0;
- pRaInfo->PTPreRate = 0;
- pRaInfo->PTPreRssi = 0;
- pRaInfo->PTModeSS = 0;
- pRaInfo->RAstage = 0;
- return 0;
-}
-
-int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm)
-{
- u8 macid = 0;
-
- dm_odm->CurrminRptTime = 0;
-
- for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++)
- ODM_RAInfo_Init(dm_odm, macid);
-
- return 0;
-}
-
-u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 macid)
-{
- if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
- return 0;
- return dm_odm->RAInfo[macid].RateSGI;
-}
-
-u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 macid)
-{
- u8 DecisionRate = 0;
-
- if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
- return 0;
- DecisionRate = (dm_odm->RAInfo[macid].DecisionRate);
- return DecisionRate;
-}
-
-u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 macid)
-{
- u8 PTStage = 5;
-
- if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
- return 0;
- PTStage = (dm_odm->RAInfo[macid].PTStage);
- return PTStage;
-}
-
-void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 RateID, u32 RateMask, u8 SGIEnable)
-{
- struct odm_ra_info *pRaInfo = NULL;
-
- if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
- return;
-
- pRaInfo = &dm_odm->RAInfo[macid];
- pRaInfo->RateID = RateID;
- pRaInfo->RateMask = RateMask;
- pRaInfo->SGIEnable = SGIEnable;
- odm_ARFBRefresh_8188E(dm_odm, pRaInfo);
-}
-
-void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
-{
- struct odm_ra_info *pRaInfo = NULL;
-
- if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
- return;
-
- pRaInfo = &dm_odm->RAInfo[macid];
- pRaInfo->RssiStaRA = Rssi;
-}
-
-void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
-{
- rtw_write16(dm_odm->Adapter, REG_TX_RPT_TIME, minRptTime);
-}
-
-void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
-{
- struct odm_ra_info *pRAInfo = NULL;
- u8 MacId = 0;
- u8 *pBuffer = NULL;
- u32 valid = 0, ItemNum = 0;
- u16 minRptTime = 0x927c;
-
- ItemNum = TxRPT_Len >> 3;
- pBuffer = TxRPT_Buf;
-
- do {
- if (MacId >= ODM_ASSOCIATE_ENTRY_NUM)
- valid = 0;
- else if (MacId >= 32)
- valid = (1 << (MacId - 32)) & macid_entry1;
- else
- valid = (1 << MacId) & macid_entry0;
-
- pRAInfo = &dm_odm->RAInfo[MacId];
- if (valid) {
- pRAInfo->RTY[0] = le16_to_cpup((__le16 *)pBuffer);
- pRAInfo->RTY[1] = pBuffer[2];
- pRAInfo->RTY[2] = pBuffer[3];
- pRAInfo->RTY[3] = pBuffer[4];
- pRAInfo->RTY[4] = pBuffer[5];
- pRAInfo->DROP = pBuffer[6];
- pRAInfo->TOTAL = pRAInfo->RTY[0] + pRAInfo->RTY[1] +
- pRAInfo->RTY[2] + pRAInfo->RTY[3] +
- pRAInfo->RTY[4] + pRAInfo->DROP;
- if (pRAInfo->TOTAL != 0) {
- if (pRAInfo->PTActive) {
- if (pRAInfo->RAstage < 5)
- odm_RateDecision_8188E(dm_odm, pRAInfo);
- else if (pRAInfo->RAstage == 5) /* Power training try state */
- odm_PTTryState_8188E(pRAInfo);
- else /* RAstage == 6 */
- odm_PTDecision_8188E(pRAInfo);
-
- /* Stage_RA counter */
- if (pRAInfo->RAstage <= 5)
- pRAInfo->RAstage++;
- else
- pRAInfo->RAstage = 0;
- } else {
- odm_RateDecision_8188E(dm_odm, pRAInfo);
- }
- }
- }
-
- if (minRptTime > pRAInfo->RptTime)
- minRptTime = pRAInfo->RptTime;
-
- pBuffer += TX_RPT2_ITEM_SIZE;
- MacId++;
- } while (MacId < ItemNum);
-
- odm_RATxRPTTimerSetting(dm_odm, minRptTime);
-}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
deleted file mode 100644
index 23b7205722b5..000000000000
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/rtw_iol.h"
-
-#define read_next_pair(array, v1, v2, i) \
- do { \
- i += 2; \
- v1 = array[i]; \
- v2 = array[i + 1]; \
- } while (0)
-
-static bool CheckCondition(const u32 condition, const u32 hex)
-{
- u32 _interface = (hex & 0x0000FF00) >> 8;
- u32 _platform = (hex & 0x00FF0000) >> 16;
- u32 cond = condition;
-
- if (condition == 0xCDCDCDCD)
- return true;
-
- cond = condition & 0x0000FF00;
- cond = cond >> 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = condition & 0x00FF0000;
- cond = cond >> 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* AGC_TAB_1T.TXT
-******************************************************************************/
-
-static u32 array_agc_tab_1t_8188e[] = {
- 0xC78, 0xFB000001,
- 0xC78, 0xFB010001,
- 0xC78, 0xFB020001,
- 0xC78, 0xFB030001,
- 0xC78, 0xFB040001,
- 0xC78, 0xFB050001,
- 0xC78, 0xFA060001,
- 0xC78, 0xF9070001,
- 0xC78, 0xF8080001,
- 0xC78, 0xF7090001,
- 0xC78, 0xF60A0001,
- 0xC78, 0xF50B0001,
- 0xC78, 0xF40C0001,
- 0xC78, 0xF30D0001,
- 0xC78, 0xF20E0001,
- 0xC78, 0xF10F0001,
- 0xC78, 0xF0100001,
- 0xC78, 0xEF110001,
- 0xC78, 0xEE120001,
- 0xC78, 0xED130001,
- 0xC78, 0xEC140001,
- 0xC78, 0xEB150001,
- 0xC78, 0xEA160001,
- 0xC78, 0xE9170001,
- 0xC78, 0xE8180001,
- 0xC78, 0xE7190001,
- 0xC78, 0xE61A0001,
- 0xC78, 0xE51B0001,
- 0xC78, 0xE41C0001,
- 0xC78, 0xE31D0001,
- 0xC78, 0xE21E0001,
- 0xC78, 0xE11F0001,
- 0xC78, 0x8A200001,
- 0xC78, 0x89210001,
- 0xC78, 0x88220001,
- 0xC78, 0x87230001,
- 0xC78, 0x86240001,
- 0xC78, 0x85250001,
- 0xC78, 0x84260001,
- 0xC78, 0x83270001,
- 0xC78, 0x82280001,
- 0xC78, 0x6B290001,
- 0xC78, 0x6A2A0001,
- 0xC78, 0x692B0001,
- 0xC78, 0x682C0001,
- 0xC78, 0x672D0001,
- 0xC78, 0x662E0001,
- 0xC78, 0x652F0001,
- 0xC78, 0x64300001,
- 0xC78, 0x63310001,
- 0xC78, 0x62320001,
- 0xC78, 0x61330001,
- 0xC78, 0x46340001,
- 0xC78, 0x45350001,
- 0xC78, 0x44360001,
- 0xC78, 0x43370001,
- 0xC78, 0x42380001,
- 0xC78, 0x41390001,
- 0xC78, 0x403A0001,
- 0xC78, 0x403B0001,
- 0xC78, 0x403C0001,
- 0xC78, 0x403D0001,
- 0xC78, 0x403E0001,
- 0xC78, 0x403F0001,
- 0xC78, 0xFB400001,
- 0xC78, 0xFB410001,
- 0xC78, 0xFB420001,
- 0xC78, 0xFB430001,
- 0xC78, 0xFB440001,
- 0xC78, 0xFB450001,
- 0xC78, 0xFB460001,
- 0xC78, 0xFB470001,
- 0xC78, 0xFB480001,
- 0xC78, 0xFA490001,
- 0xC78, 0xF94A0001,
- 0xC78, 0xF84B0001,
- 0xC78, 0xF74C0001,
- 0xC78, 0xF64D0001,
- 0xC78, 0xF54E0001,
- 0xC78, 0xF44F0001,
- 0xC78, 0xF3500001,
- 0xC78, 0xF2510001,
- 0xC78, 0xF1520001,
- 0xC78, 0xF0530001,
- 0xC78, 0xEF540001,
- 0xC78, 0xEE550001,
- 0xC78, 0xED560001,
- 0xC78, 0xEC570001,
- 0xC78, 0xEB580001,
- 0xC78, 0xEA590001,
- 0xC78, 0xE95A0001,
- 0xC78, 0xE85B0001,
- 0xC78, 0xE75C0001,
- 0xC78, 0xE65D0001,
- 0xC78, 0xE55E0001,
- 0xC78, 0xE45F0001,
- 0xC78, 0xE3600001,
- 0xC78, 0xE2610001,
- 0xC78, 0xC3620001,
- 0xC78, 0xC2630001,
- 0xC78, 0xC1640001,
- 0xC78, 0x8B650001,
- 0xC78, 0x8A660001,
- 0xC78, 0x89670001,
- 0xC78, 0x88680001,
- 0xC78, 0x87690001,
- 0xC78, 0x866A0001,
- 0xC78, 0x856B0001,
- 0xC78, 0x846C0001,
- 0xC78, 0x676D0001,
- 0xC78, 0x666E0001,
- 0xC78, 0x656F0001,
- 0xC78, 0x64700001,
- 0xC78, 0x63710001,
- 0xC78, 0x62720001,
- 0xC78, 0x61730001,
- 0xC78, 0x60740001,
- 0xC78, 0x46750001,
- 0xC78, 0x45760001,
- 0xC78, 0x44770001,
- 0xC78, 0x43780001,
- 0xC78, 0x42790001,
- 0xC78, 0x417A0001,
- 0xC78, 0x407B0001,
- 0xC78, 0x407C0001,
- 0xC78, 0x407D0001,
- 0xC78, 0x407E0001,
- 0xC78, 0x407F0001,
-};
-
-static void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
-}
-
-int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
-{
- u32 hex = 0;
- u32 i = 0;
- u32 arraylen = ARRAY_SIZE(array_agc_tab_1t_8188e);
- u32 *array = array_agc_tab_1t_8188e;
- bool biol = false;
- struct adapter *adapter = dm_odm->Adapter;
- struct xmit_frame *pxmit_frame = NULL;
- u8 bndy_cnt = 1;
-
- hex += ODM_ITRF_USB << 8;
- hex += ODM_CE << 16;
- hex += 0xFF000000;
- biol = rtw_IOL_applied(adapter);
-
- if (biol) {
- pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
- if (!pxmit_frame) {
- pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < arraylen; i += 2) {
- u32 v1 = array[i];
- u32 v2 = array[i + 1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
- } else {
- odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
- }
- continue;
- } else {
- /* This line is the start line of branch. */
- if (!CheckCondition(array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- read_next_pair(array, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylen - 2)
- read_next_pair(array, v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else { /* Configure matched pairs and skip to end of if-else. */
- read_next_pair(array, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylen - 2) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
- } else {
- odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
- }
- read_next_pair(array, v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < arraylen - 2)
- read_next_pair(array, v1, v2, i);
- }
- }
- }
- if (biol) {
- if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
- return -1;
- }
- }
- return 0;
-}
-
-/******************************************************************************
-* PHY_REG_1T.TXT
-******************************************************************************/
-
-static u32 array_phy_reg_1t_8188e[] = {
- 0x800, 0x80040000,
- 0x804, 0x00000003,
- 0x808, 0x0000FC00,
- 0x80C, 0x0000000A,
- 0x810, 0x10001331,
- 0x814, 0x020C3D10,
- 0x818, 0x02200385,
- 0x81C, 0x00000000,
- 0x820, 0x01000100,
- 0x824, 0x00390204,
- 0x828, 0x00000000,
- 0x82C, 0x00000000,
- 0x830, 0x00000000,
- 0x834, 0x00000000,
- 0x838, 0x00000000,
- 0x83C, 0x00000000,
- 0x840, 0x00010000,
- 0x844, 0x00000000,
- 0x848, 0x00000000,
- 0x84C, 0x00000000,
- 0x850, 0x00000000,
- 0x854, 0x00000000,
- 0x858, 0x569A11A9,
- 0x85C, 0x01000014,
- 0x860, 0x66F60110,
- 0x864, 0x061F0649,
- 0x868, 0x00000000,
- 0x86C, 0x27272700,
- 0x870, 0x07000760,
- 0x874, 0x25004000,
- 0x878, 0x00000808,
- 0x87C, 0x00000000,
- 0x880, 0xB0000C1C,
- 0x884, 0x00000001,
- 0x888, 0x00000000,
- 0x88C, 0xCCC000C0,
- 0x890, 0x00000800,
- 0x894, 0xFFFFFFFE,
- 0x898, 0x40302010,
- 0x89C, 0x00706050,
- 0x900, 0x00000000,
- 0x904, 0x00000023,
- 0x908, 0x00000000,
- 0x90C, 0x81121111,
- 0x910, 0x00000002,
- 0x914, 0x00000201,
- 0xA00, 0x00D047C8,
- 0xA04, 0x80FF000C,
- 0xA08, 0x8C838300,
- 0xA0C, 0x2E7F120F,
- 0xA10, 0x9500BB78,
- 0xA14, 0x1114D028,
- 0xA18, 0x00881117,
- 0xA1C, 0x89140F00,
- 0xA20, 0x1A1B0000,
- 0xA24, 0x090E1317,
- 0xA28, 0x00000204,
- 0xA2C, 0x00D30000,
- 0xA70, 0x101FBF00,
- 0xA74, 0x00000007,
- 0xA78, 0x00000900,
- 0xA7C, 0x225B0606,
- 0xA80, 0x218075B1,
- 0xB2C, 0x80000000,
- 0xC00, 0x48071D40,
- 0xC04, 0x03A05611,
- 0xC08, 0x000000E4,
- 0xC0C, 0x6C6C6C6C,
- 0xC10, 0x08800000,
- 0xC14, 0x40000100,
- 0xC18, 0x08800000,
- 0xC1C, 0x40000100,
- 0xC20, 0x00000000,
- 0xC24, 0x00000000,
- 0xC28, 0x00000000,
- 0xC2C, 0x00000000,
- 0xC30, 0x69E9AC47,
- 0xC34, 0x469652AF,
- 0xC38, 0x49795994,
- 0xC3C, 0x0A97971C,
- 0xC40, 0x1F7C403F,
- 0xC44, 0x000100B7,
- 0xC48, 0xEC020107,
- 0xC4C, 0x007F037F,
- 0xC50, 0x69553420,
- 0xC54, 0x43BC0094,
- 0xC58, 0x00013169,
- 0xC5C, 0x00250492,
- 0xC60, 0x00000000,
- 0xC64, 0x7112848B,
- 0xC68, 0x47C00BFF,
- 0xC6C, 0x00000036,
- 0xC70, 0x2C7F000D,
- 0xC74, 0x020610DB,
- 0xC78, 0x0000001F,
- 0xC7C, 0x00B91612,
- 0xC80, 0x390000E4,
- 0xC84, 0x20F60000,
- 0xC88, 0x40000100,
- 0xC8C, 0x20200000,
- 0xC90, 0x00091521,
- 0xC94, 0x00000000,
- 0xC98, 0x00121820,
- 0xC9C, 0x00007F7F,
- 0xCA0, 0x00000000,
- 0xCA4, 0x000300A0,
- 0xCA8, 0x00000000,
- 0xCAC, 0x00000000,
- 0xCB0, 0x00000000,
- 0xCB4, 0x00000000,
- 0xCB8, 0x00000000,
- 0xCBC, 0x28000000,
- 0xCC0, 0x00000000,
- 0xCC4, 0x00000000,
- 0xCC8, 0x00000000,
- 0xCCC, 0x00000000,
- 0xCD0, 0x00000000,
- 0xCD4, 0x00000000,
- 0xCD8, 0x64B22427,
- 0xCDC, 0x00766932,
- 0xCE0, 0x00222222,
- 0xCE4, 0x00000000,
- 0xCE8, 0x37644302,
- 0xCEC, 0x2F97D40C,
- 0xD00, 0x00000740,
- 0xD04, 0x00020401,
- 0xD08, 0x0000907F,
- 0xD0C, 0x20010201,
- 0xD10, 0xA0633333,
- 0xD14, 0x3333BC43,
- 0xD18, 0x7A8F5B6F,
- 0xD2C, 0xCC979975,
- 0xD30, 0x00000000,
- 0xD34, 0x80608000,
- 0xD38, 0x00000000,
- 0xD3C, 0x00127353,
- 0xD40, 0x00000000,
- 0xD44, 0x00000000,
- 0xD48, 0x00000000,
- 0xD4C, 0x00000000,
- 0xD50, 0x6437140A,
- 0xD54, 0x00000000,
- 0xD58, 0x00000282,
- 0xD5C, 0x30032064,
- 0xD60, 0x4653DE68,
- 0xD64, 0x04518A3C,
- 0xD68, 0x00002101,
- 0xD6C, 0x2A201C16,
- 0xD70, 0x1812362E,
- 0xD74, 0x322C2220,
- 0xD78, 0x000E3C24,
- 0xE00, 0x2D2D2D2D,
- 0xE04, 0x2D2D2D2D,
- 0xE08, 0x0390272D,
- 0xE10, 0x2D2D2D2D,
- 0xE14, 0x2D2D2D2D,
- 0xE18, 0x2D2D2D2D,
- 0xE1C, 0x2D2D2D2D,
- 0xE28, 0x00000000,
- 0xE30, 0x1000DC1F,
- 0xE34, 0x10008C1F,
- 0xE38, 0x02140102,
- 0xE3C, 0x681604C2,
- 0xE40, 0x01007C00,
- 0xE44, 0x01004800,
- 0xE48, 0xFB000000,
- 0xE4C, 0x000028D1,
- 0xE50, 0x1000DC1F,
- 0xE54, 0x10008C1F,
- 0xE58, 0x02140102,
- 0xE5C, 0x28160D05,
- 0xE60, 0x00000008,
- 0xE68, 0x001B25A4,
- 0xE6C, 0x00C00014,
- 0xE70, 0x00C00014,
- 0xE74, 0x01000014,
- 0xE78, 0x01000014,
- 0xE7C, 0x01000014,
- 0xE80, 0x01000014,
- 0xE84, 0x00C00014,
- 0xE88, 0x01000014,
- 0xE8C, 0x00C00014,
- 0xED0, 0x00C00014,
- 0xED4, 0x00C00014,
- 0xED8, 0x00C00014,
- 0xEDC, 0x00000014,
- 0xEE0, 0x00000014,
- 0xEEC, 0x01C00014,
- 0xF14, 0x00000003,
- 0xF4C, 0x00000000,
- 0xF00, 0x00000300,
-};
-
-static void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- if (Addr == 0xfe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- if (Addr == 0xa24)
- pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
-
-int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
-{
- u32 hex = 0;
- u32 i = 0;
- u32 arraylen = ARRAY_SIZE(array_phy_reg_1t_8188e);
- u32 *array = array_phy_reg_1t_8188e;
- bool biol = false;
- struct adapter *adapter = dm_odm->Adapter;
- struct xmit_frame *pxmit_frame = NULL;
- u8 bndy_cnt = 1;
- hex += ODM_ITRF_USB << 8;
- hex += ODM_CE << 16;
- hex += 0xFF000000;
- biol = rtw_IOL_applied(adapter);
-
- if (biol) {
- pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
- if (!pxmit_frame) {
- pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < arraylen; i += 2) {
- u32 v1 = array[i];
- u32 v2 = array[i + 1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- if (v1 == 0xfe) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
- } else if (v1 == 0xfd) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
- } else if (v1 == 0xfc) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
- } else if (v1 == 0xfb) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
- } else if (v1 == 0xfa) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
- } else if (v1 == 0xf9) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
- } else {
- if (v1 == 0xa24)
- dm_odm->RFCalibrateInfo.RegA24 = v2;
- rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
- }
- } else {
- odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
- }
- continue;
- } else { /* This line is the start line of branch. */
- if (!CheckCondition(array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- read_next_pair(array, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylen - 2)
- read_next_pair(array, v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else { /* Configure matched pairs and skip to end of if-else. */
- read_next_pair(array, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylen - 2) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- if (v1 == 0xfe) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
- } else if (v1 == 0xfd) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
- } else if (v1 == 0xfc) {
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
- } else if (v1 == 0xfb) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
- } else if (v1 == 0xfa) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
- } else if (v1 == 0xf9) {
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
- } else {
- if (v1 == 0xa24)
- dm_odm->RFCalibrateInfo.RegA24 = v2;
-
- rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
- }
- } else {
- odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
- }
- read_next_pair(array, v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < arraylen - 2)
- read_next_pair(array, v1, v2, i);
- }
- }
- }
- if (biol) {
- if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
- return -1;
- }
- }
- return 0;
-}
-
-/******************************************************************************
-* PHY_REG_PG.TXT
-******************************************************************************/
-
-static u32 array_phy_reg_pg_8188e[] = {
- 0xE00, 0xFFFFFFFF, 0x06070809,
- 0xE04, 0xFFFFFFFF, 0x02020405,
- 0xE08, 0x0000FF00, 0x00000006,
- 0x86C, 0xFFFFFF00, 0x00020400,
- 0xE10, 0xFFFFFFFF, 0x08090A0B,
- 0xE14, 0xFFFFFFFF, 0x01030607,
- 0xE18, 0xFFFFFFFF, 0x08090A0B,
- 0xE1C, 0xFFFFFFFF, 0x01030607,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x02020202,
- 0xE04, 0xFFFFFFFF, 0x00020202,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x04040404,
- 0xE14, 0xFFFFFFFF, 0x00020404,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x02020202,
- 0xE04, 0xFFFFFFFF, 0x00020202,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x04040404,
- 0xE14, 0xFFFFFFFF, 0x00020404,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x02020202,
- 0xE04, 0xFFFFFFFF, 0x00020202,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x04040404,
- 0xE14, 0xFFFFFFFF, 0x00020404,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
- 0xE00, 0xFFFFFFFF, 0x00000000,
- 0xE04, 0xFFFFFFFF, 0x00000000,
- 0xE08, 0x0000FF00, 0x00000000,
- 0x86C, 0xFFFFFF00, 0x00000000,
- 0xE10, 0xFFFFFFFF, 0x00000000,
- 0xE14, 0xFFFFFFFF, 0x00000000,
- 0xE18, 0xFFFFFFFF, 0x00000000,
- 0xE1C, 0xFFFFFFFF, 0x00000000,
-
-};
-
-static void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask,
- u32 Data)
-{
- if (Addr == 0xfe)
- msleep(50);
- else if (Addr == 0xfd)
- mdelay(5);
- else if (Addr == 0xfc)
- mdelay(1);
- else if (Addr == 0xfb)
- udelay(50);
- else if (Addr == 0xfa)
- udelay(5);
- else if (Addr == 0xf9)
- udelay(1);
- else
- storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
-}
-
-void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
-{
- u32 hex;
- u32 i = 0;
- u32 arraylen = ARRAY_SIZE(array_phy_reg_pg_8188e);
- u32 *array = array_phy_reg_pg_8188e;
-
- hex = ODM_ITRF_USB << 8;
- hex += (ODM_CE << 16) + 0xFF000000;
-
- for (i = 0; i < arraylen; i += 3) {
- u32 v1 = array[i];
- u32 v2 = array[i + 1];
- u32 v3 = array[i + 2];
-
- /* this line is a line of pure_body */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigBB_PHY_REG_PG_8188E(dm_odm, v1, v2, v3);
- continue;
- } else { /* this line is the start of branch */
- if (!CheckCondition(array[i], hex)) {
- /* don't need the hw_body */
- i += 2; /* skip the pair of expression */
- v1 = array[i];
- v2 = array[i + 1];
- v3 = array[i + 2];
- while (v2 != 0xDEAD) {
- i += 3;
- v1 = array[i];
- v2 = array[i + 1];
- v3 = array[i + 1];
- }
- }
- }
- }
-}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
deleted file mode 100644
index da71867bcca3..000000000000
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/rtw_iol.h"
-
-static bool Checkcondition(const u32 condition, const u32 hex)
-{
- u32 _board = (hex & 0x000000FF);
- u32 _interface = (hex & 0x0000FF00) >> 8;
- u32 _platform = (hex & 0x00FF0000) >> 16;
- u32 cond = condition;
-
- if (condition == 0xCDCDCDCD)
- return true;
-
- cond = condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
- cond = condition & 0x0000FF00;
- cond = cond >> 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = condition & 0x00FF0000;
- cond = cond >> 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* MAC_REG.TXT
-******************************************************************************/
-
-static u32 array_MAC_REG_8188E[] = {
- 0x026, 0x00000041,
- 0x027, 0x00000035,
- 0x428, 0x0000000A,
- 0x429, 0x00000010,
- 0x430, 0x00000000,
- 0x431, 0x00000001,
- 0x432, 0x00000002,
- 0x433, 0x00000004,
- 0x434, 0x00000005,
- 0x435, 0x00000006,
- 0x436, 0x00000007,
- 0x437, 0x00000008,
- 0x438, 0x00000000,
- 0x439, 0x00000000,
- 0x43A, 0x00000001,
- 0x43B, 0x00000002,
- 0x43C, 0x00000004,
- 0x43D, 0x00000005,
- 0x43E, 0x00000006,
- 0x43F, 0x00000007,
- 0x440, 0x0000005D,
- 0x441, 0x00000001,
- 0x442, 0x00000000,
- 0x444, 0x00000015,
- 0x445, 0x000000F0,
- 0x446, 0x0000000F,
- 0x447, 0x00000000,
- 0x458, 0x00000041,
- 0x459, 0x000000A8,
- 0x45A, 0x00000072,
- 0x45B, 0x000000B9,
- 0x460, 0x00000066,
- 0x461, 0x00000066,
- 0x480, 0x00000008,
- 0x4C8, 0x000000FF,
- 0x4C9, 0x00000008,
- 0x4CC, 0x000000FF,
- 0x4CD, 0x000000FF,
- 0x4CE, 0x00000001,
- 0x4D3, 0x00000001,
- 0x500, 0x00000026,
- 0x501, 0x000000A2,
- 0x502, 0x0000002F,
- 0x503, 0x00000000,
- 0x504, 0x00000028,
- 0x505, 0x000000A3,
- 0x506, 0x0000005E,
- 0x507, 0x00000000,
- 0x508, 0x0000002B,
- 0x509, 0x000000A4,
- 0x50A, 0x0000005E,
- 0x50B, 0x00000000,
- 0x50C, 0x0000004F,
- 0x50D, 0x000000A4,
- 0x50E, 0x00000000,
- 0x50F, 0x00000000,
- 0x512, 0x0000001C,
- 0x514, 0x0000000A,
- 0x516, 0x0000000A,
- 0x525, 0x0000004F,
- 0x550, 0x00000010,
- 0x551, 0x00000010,
- 0x559, 0x00000002,
- 0x55D, 0x000000FF,
- 0x605, 0x00000030,
- 0x608, 0x0000000E,
- 0x609, 0x0000002A,
- 0x620, 0x000000FF,
- 0x621, 0x000000FF,
- 0x622, 0x000000FF,
- 0x623, 0x000000FF,
- 0x624, 0x000000FF,
- 0x625, 0x000000FF,
- 0x626, 0x000000FF,
- 0x627, 0x000000FF,
- 0x652, 0x00000020,
- 0x63C, 0x0000000A,
- 0x63D, 0x0000000A,
- 0x63E, 0x0000000E,
- 0x63F, 0x0000000E,
- 0x640, 0x00000040,
- 0x66E, 0x00000005,
- 0x700, 0x00000021,
- 0x701, 0x00000043,
- 0x702, 0x00000065,
- 0x703, 0x00000087,
- 0x708, 0x00000021,
- 0x709, 0x00000043,
- 0x70A, 0x00000065,
- 0x70B, 0x00000087,
-};
-
-static void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
-{
- rtw_write8(pDM_Odm->Adapter, Addr, Data);
-}
-
-int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
-{
- #define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = array[i]; v2 = array[i + 1]; } while (0)
-
- u32 hex = 0;
- u32 i;
- u32 array_len = ARRAY_SIZE(array_MAC_REG_8188E);
- u32 *array = array_MAC_REG_8188E;
- bool biol = false;
-
- struct adapter *adapt = dm_odm->Adapter;
- struct xmit_frame *pxmit_frame = NULL;
- u8 bndy_cnt = 1;
- hex += ODM_ITRF_USB << 8;
- hex += ODM_CE << 16;
- hex += 0xFF000000;
-
- biol = rtw_IOL_applied(adapt);
-
- if (biol) {
- pxmit_frame = rtw_IOL_accquire_xmit_frame(adapt);
- if (!pxmit_frame) {
- pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < array_len; i += 2) {
- u32 v1 = array[i];
- u32 v2 = array[i + 1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
- } else {
- odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
- }
- continue;
- } else { /* This line is the start line of branch. */
- if (!Checkcondition(array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < array_len - 2) {
- READ_NEXT_PAIR(v1, v2, i);
- }
- i -= 2; /* prevent from for-loop += 2 */
- } else { /* Configure matched pairs and skip to end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < array_len - 2) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
- rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
- } else {
- odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
- }
-
- READ_NEXT_PAIR(v1, v2, i);
- }
- while (v2 != 0xDEAD && i < array_len - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
- if (biol) {
- if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
- return -1;
- }
- }
- return 0;
-}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
deleted file mode 100644
index a4c3d3d149f7..000000000000
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/rtw_iol.h"
-
-static bool CheckCondition(const u32 Condition, const u32 Hex)
-{
- u32 _interface = (Hex & 0x0000FF00) >> 8;
- u32 _platform = (Hex & 0x00FF0000) >> 16;
- u32 cond = Condition;
-
- if (Condition == 0xCDCDCDCD)
- return true;
-
- cond = Condition & 0x0000FF00;
- cond = cond >> 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = Condition & 0x00FF0000;
- cond = cond >> 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* RadioA_1T.TXT
-******************************************************************************/
-
-static u32 Array_RadioA_1T_8188E[] = {
- 0x000, 0x00030000,
- 0x008, 0x00084000,
- 0x018, 0x00000407,
- 0x019, 0x00000012,
- 0x01E, 0x00080009,
- 0x01F, 0x00000880,
- 0x02F, 0x0001A060,
- 0x03F, 0x00000000,
- 0x042, 0x000060C0,
- 0x057, 0x000D0000,
- 0x058, 0x000BE180,
- 0x067, 0x00001552,
- 0x083, 0x00000000,
- 0x0B0, 0x000FF8FC,
- 0x0B1, 0x00054400,
- 0x0B2, 0x000CCC19,
- 0x0B4, 0x00043003,
- 0x0B6, 0x0004953E,
- 0x0B7, 0x0001C718,
- 0x0B8, 0x000060FF,
- 0x0B9, 0x00080001,
- 0x0BA, 0x00040000,
- 0x0BB, 0x00000400,
- 0x0BF, 0x000C0000,
- 0x0C2, 0x00002400,
- 0x0C3, 0x00000009,
- 0x0C4, 0x00040C91,
- 0x0C5, 0x00099999,
- 0x0C6, 0x000000A3,
- 0x0C7, 0x00088820,
- 0x0C8, 0x00076C06,
- 0x0C9, 0x00000000,
- 0x0CA, 0x00080000,
- 0x0DF, 0x00000180,
- 0x0EF, 0x000001A0,
- 0x051, 0x0006B27D,
- 0xFF0F041F, 0xABCD,
- 0x052, 0x0007E4DD,
- 0xCDCDCDCD, 0xCDCD,
- 0x052, 0x0007E49D,
- 0xFF0F041F, 0xDEAD,
- 0x053, 0x00000073,
- 0x056, 0x00051FF3,
- 0x035, 0x00000086,
- 0x035, 0x00000186,
- 0x035, 0x00000286,
- 0x036, 0x00001C25,
- 0x036, 0x00009C25,
- 0x036, 0x00011C25,
- 0x036, 0x00019C25,
- 0x0B6, 0x00048538,
- 0x018, 0x00000C07,
- 0x05A, 0x0004BD00,
- 0x019, 0x000739D0,
- 0x034, 0x0000ADF3,
- 0x034, 0x00009DF0,
- 0x034, 0x00008DED,
- 0x034, 0x00007DEA,
- 0x034, 0x00006DE7,
- 0x034, 0x000054EE,
- 0x034, 0x000044EB,
- 0x034, 0x000034E8,
- 0x034, 0x0000246B,
- 0x034, 0x00001468,
- 0x034, 0x0000006D,
- 0x000, 0x00030159,
- 0x084, 0x00068200,
- 0x086, 0x000000CE,
- 0x087, 0x00048A00,
- 0x08E, 0x00065540,
- 0x08F, 0x00088000,
- 0x0EF, 0x000020A0,
- 0x03B, 0x000F02B0,
- 0x03B, 0x000EF7B0,
- 0x03B, 0x000D4FB0,
- 0x03B, 0x000CF060,
- 0x03B, 0x000B0090,
- 0x03B, 0x000A0080,
- 0x03B, 0x00090080,
- 0x03B, 0x0008F780,
- 0x03B, 0x000722B0,
- 0x03B, 0x0006F7B0,
- 0x03B, 0x00054FB0,
- 0x03B, 0x0004F060,
- 0x03B, 0x00030090,
- 0x03B, 0x00020080,
- 0x03B, 0x00010080,
- 0x03B, 0x0000F780,
- 0x0EF, 0x000000A0,
- 0x000, 0x00010159,
- 0x018, 0x0000F407,
- 0xFFE, 0x00000000,
- 0xFFE, 0x00000000,
- 0x01F, 0x00080003,
- 0xFFE, 0x00000000,
- 0xFFE, 0x00000000,
- 0x01E, 0x00000001,
- 0x01F, 0x00080000,
- 0x000, 0x00033E60,
-};
-
-static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, u32 RegAddr)
-{
- if (Addr == 0xffe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RegAddr, bRFRegOffsetMask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
-
-static void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
-{
- u32 content = 0x1000; /* RF_Content: radioa_txt */
- u32 maskforPhySet = (u32)(content & 0xE000);
-
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, Addr | maskforPhySet);
-}
-
-int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
-{
- #define READ_NEXT_PAIR(v1, v2, i) do \
- { i += 2; v1 = Array[i]; \
- v2 = Array[i + 1]; } while (0)
-
- u32 hex = 0;
- u32 i = 0;
- u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8188E);
- u32 *Array = Array_RadioA_1T_8188E;
- bool biol = false;
- struct adapter *Adapter = pDM_Odm->Adapter;
- struct xmit_frame *pxmit_frame = NULL;
- u8 bndy_cnt = 1;
-
- hex += ODM_ITRF_USB << 8;
- hex += ODM_CE << 16;
- hex += 0xFF000000;
- biol = rtw_IOL_applied(Adapter);
-
- if (biol) {
- pxmit_frame = rtw_IOL_accquire_xmit_frame(Adapter);
- if (!pxmit_frame) {
- pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i + 1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
-
- if (v1 == 0xffe)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
- else if (v1 == 0xfd)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
- else if (v1 == 0xfc)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
- else if (v1 == 0xfb)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
- else if (v1 == 0xfa)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
- else if (v1 == 0xf9)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
- else
- rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
- } else {
- odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
- }
- continue;
- } else { /* This line is the start line of branch. */
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else { /* Configure matched pairs and skip to end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- if (biol) {
- if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
- bndy_cnt++;
-
- if (v1 == 0xffe)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
- else if (v1 == 0xfd)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
- else if (v1 == 0xfc)
- rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
- else if (v1 == 0xfb)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
- else if (v1 == 0xfa)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
- else if (v1 == 0xf9)
- rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
- else
- rtw_IOL_append_WRF_cmd(pxmit_frame, RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
- } else {
- odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
- }
- READ_NEXT_PAIR(v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
- if (biol) {
- if (!rtl8188e_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
- return -1;
- }
- }
- return 0;
-}
diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
deleted file mode 100644
index 26e710ef5134..000000000000
--- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
+++ /dev/null
@@ -1,900 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-/*---------------------------Define Local Constant---------------------------*/
-/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
-#define ODM_TXPWRTRACK_MAX_IDX_88E 6
-
-/*---------------------------Define Local Constant---------------------------*/
-
-/* 3============================================================ */
-/* 3 Tx Power Tracking */
-/* 3============================================================ */
-/*-----------------------------------------------------------------------------
- * Function: ODM_TxPwrTrackAdjust88E()
- *
- * Overview: 88E we can not write 0xc80/c94/c4c/ 0xa2x. Instead of write TX agc.
- * No matter OFDM & CCK use the same method.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 04/23/2012 MHC Create Version 0.
- * 04/23/2012 MHC Adjust TX agc directly not throughput BB digital.
- *
- *---------------------------------------------------------------------------*/
-void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *dm_odm, u8 Type,/* 0 = OFDM, 1 = CCK */
- u8 *pDirection, /* 1 = +(increase) 2 = -(decrease) */
- u32 *pOutWriteVal /* Tx tracking CCK/OFDM BB swing index adjust */
- )
-{
- u8 pwr_value = 0;
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
- if (Type == 0) { /* For OFDM afjust */
- if (dm_odm->BbSwingIdxOfdm <= dm_odm->BbSwingIdxOfdmBase) {
- *pDirection = 1;
- pwr_value = (dm_odm->BbSwingIdxOfdmBase - dm_odm->BbSwingIdxOfdm);
- } else {
- *pDirection = 2;
- pwr_value = (dm_odm->BbSwingIdxOfdm - dm_odm->BbSwingIdxOfdmBase);
- }
- } else if (Type == 1) { /* For CCK adjust. */
- if (dm_odm->BbSwingIdxCck <= dm_odm->BbSwingIdxCckBase) {
- *pDirection = 1;
- pwr_value = (dm_odm->BbSwingIdxCckBase - dm_odm->BbSwingIdxCck);
- } else {
- *pDirection = 2;
- pwr_value = (dm_odm->BbSwingIdxCck - dm_odm->BbSwingIdxCckBase);
- }
- }
-
- /* */
- /* 2012/04/25 MH According to Ed/Luke.Lees estimate for EVM the max tx power tracking */
- /* need to be less than 6 power index for 88E. */
- /* */
- if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *pDirection == 1)
- pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E;
-
- *pOutWriteVal = pwr_value | (pwr_value << 8) | (pwr_value << 16) | (pwr_value << 24);
-} /* ODM_TxPwrTrackAdjust88E */
-
-/*-----------------------------------------------------------------------------
- * Function: odm_TxPwrTrackSetPwr88E()
- *
- * Overview: 88E change all channel tx power according to flag.
- * OFDM & CCK are all different.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 04/23/2012 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-static void odm_TxPwrTrackSetPwr88E(struct odm_dm_struct *dm_odm)
-{
- if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
- PHY_SetTxPowerLevel8188E(dm_odm->Adapter, *dm_odm->pChannel);
- dm_odm->BbSwingFlagOfdm = false;
- dm_odm->BbSwingFlagCck = false;
- }
-} /* odm_TxPwrTrackSetPwr88E */
-
-/* 091212 chiyokolin */
-void
-odm_TXPowerTrackingCallback_ThermalMeter_8188E(
- struct adapter *Adapter
- )
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, offset;
- u8 ThermalValue_AVG_count = 0;
- u32 ThermalValue_AVG = 0;
- s32 ele_D, TempCCk;
- s8 OFDM_index, CCK_index = 0;
- s8 OFDM_index_old = 0, CCK_index_old = 0;
- u32 i = 0, j = 0;
-
- u8 OFDM_min_index = 6; /* OFDM BB Swing should be less than +3.0dB, which is required by Arthur */
- s8 OFDM_index_mapping[2][index_mapping_NUM_88E] = {
- {0, 0, 2, 3, 4, 4, /* 2.4G, decrease power */
- 5, 6, 7, 7, 8, 9,
- 10, 10, 11}, /* For lower temperature, 20120220 updated on 20120220. */
- {0, 0, -1, -2, -3, -4, /* 2.4G, increase power */
- -4, -4, -4, -5, -7, -8,
- -9, -9, -10},
- };
- u8 Thermal_mapping[2][index_mapping_NUM_88E] = {
- {0, 2, 4, 6, 8, 10, /* 2.4G, decrease power */
- 12, 14, 16, 18, 20, 22,
- 24, 26, 27},
- {0, 2, 4, 6, 8, 10, /* 2.4G,, increase power */
- 12, 14, 16, 18, 20, 22,
- 25, 25, 25},
- };
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- /* 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. */
- odm_TxPwrTrackSetPwr88E(dm_odm);
-
- /* <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. */
- dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
-
- ThermalValue = (u8)rtl8188e_PHY_QueryRFReg(Adapter, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
-
- if (ThermalValue) {
- /* Query OFDM path A default setting */
- ele_D = rtl8188e_PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
- if (ele_D == (OFDMSwingTable[i] & bMaskOFDM_D)) {
- OFDM_index_old = (u8)i;
- dm_odm->BbSwingIdxOfdmBase = (u8)i;
- break;
- }
- }
-
- /* Query CCK default setting From 0xa24 */
- TempCCk = dm_odm->RFCalibrateInfo.RegA24;
-
- for (i = 0; i < CCK_TABLE_SIZE; i++) {
- if (memcmp((void *)&TempCCk, (void *)&cck_swing_table[i][2], 4)) {
- CCK_index_old = (u8)i;
- dm_odm->BbSwingIdxCckBase = (u8)i;
- break;
- }
- }
-
- if (!dm_odm->RFCalibrateInfo.ThermalValue) {
- dm_odm->RFCalibrateInfo.ThermalValue = pHalData->EEPROMThermalMeter;
- dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
- dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
-
- dm_odm->RFCalibrateInfo.OFDM_index = OFDM_index_old;
- dm_odm->RFCalibrateInfo.CCK_index = CCK_index_old;
- }
-
- /* calculate average thermal meter */
- dm_odm->RFCalibrateInfo.ThermalValue_AVG[dm_odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue;
- dm_odm->RFCalibrateInfo.ThermalValue_AVG_index++;
- if (dm_odm->RFCalibrateInfo.ThermalValue_AVG_index == AVG_THERMAL_NUM_88E)
- dm_odm->RFCalibrateInfo.ThermalValue_AVG_index = 0;
-
- for (i = 0; i < AVG_THERMAL_NUM_88E; i++) {
- if (dm_odm->RFCalibrateInfo.ThermalValue_AVG[i]) {
- ThermalValue_AVG += dm_odm->RFCalibrateInfo.ThermalValue_AVG[i];
- ThermalValue_AVG_count++;
- }
- }
-
- if (ThermalValue_AVG_count)
- ThermalValue = (u8)(ThermalValue_AVG / ThermalValue_AVG_count);
-
- if (dm_odm->RFCalibrateInfo.bReloadtxpowerindex) {
- delta = ThermalValue > pHalData->EEPROMThermalMeter ?
- (ThermalValue - pHalData->EEPROMThermalMeter) :
- (pHalData->EEPROMThermalMeter - ThermalValue);
- dm_odm->RFCalibrateInfo.bReloadtxpowerindex = false;
- dm_odm->RFCalibrateInfo.bDoneTxpower = false;
- } else if (dm_odm->RFCalibrateInfo.bDoneTxpower) {
- delta = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue) ?
- (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue) :
- (dm_odm->RFCalibrateInfo.ThermalValue - ThermalValue);
- } else {
- delta = ThermalValue > pHalData->EEPROMThermalMeter ?
- (ThermalValue - pHalData->EEPROMThermalMeter) :
- (pHalData->EEPROMThermalMeter - ThermalValue);
- }
- delta_LCK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_LCK) ?
- (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_LCK) :
- (dm_odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue);
- delta_IQK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_IQK) ?
- (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_IQK) :
- (dm_odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue);
-
- if ((delta_LCK >= 8)) { /* Delta temperature is equal to or larger than 20 centigrade. */
- dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
- PHY_LCCalibrate_8188E(Adapter);
- }
-
- if (delta > 0 && dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
- delta = ThermalValue > pHalData->EEPROMThermalMeter ?
- (ThermalValue - pHalData->EEPROMThermalMeter) :
- (pHalData->EEPROMThermalMeter - ThermalValue);
- /* calculate new OFDM / CCK offset */
- if (ThermalValue > pHalData->EEPROMThermalMeter)
- j = 1;
- else
- j = 0;
- for (offset = 0; offset < index_mapping_NUM_88E; offset++) {
- if (delta < Thermal_mapping[j][offset]) {
- if (offset != 0)
- offset--;
- break;
- }
- }
- if (offset >= index_mapping_NUM_88E)
- offset = index_mapping_NUM_88E - 1;
- OFDM_index = dm_odm->RFCalibrateInfo.OFDM_index + OFDM_index_mapping[j][offset];
- CCK_index = dm_odm->RFCalibrateInfo.CCK_index + OFDM_index_mapping[j][offset];
-
- if (OFDM_index > OFDM_TABLE_SIZE_92D - 1)
- OFDM_index = OFDM_TABLE_SIZE_92D - 1;
- else if (OFDM_index < OFDM_min_index)
- OFDM_index = OFDM_min_index;
-
- if (CCK_index > CCK_TABLE_SIZE - 1)
- CCK_index = CCK_TABLE_SIZE - 1;
- else if (CCK_index < 0)
- CCK_index = 0;
-
- /* 2 temporarily remove bNOPG */
- /* Config by SwingTable */
- if (dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
- dm_odm->RFCalibrateInfo.bDoneTxpower = true;
-
- /* Revse TX power table. */
- dm_odm->BbSwingIdxOfdm = (u8)OFDM_index;
- dm_odm->BbSwingIdxCck = (u8)CCK_index;
-
- if (dm_odm->BbSwingIdxOfdmCurrent != dm_odm->BbSwingIdxOfdm) {
- dm_odm->BbSwingIdxOfdmCurrent = dm_odm->BbSwingIdxOfdm;
- dm_odm->BbSwingFlagOfdm = true;
- }
-
- if (dm_odm->BbSwingIdxCckCurrent != dm_odm->BbSwingIdxCck) {
- dm_odm->BbSwingIdxCckCurrent = dm_odm->BbSwingIdxCck;
- dm_odm->BbSwingFlagCck = true;
- }
- }
- }
-
- if (delta_IQK >= 8) { /* Delta temperature is equal to or larger than 20 centigrade. */
- dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
- PHY_IQCalibrate_8188E(Adapter, false);
- }
- /* update thermal meter value */
- if (dm_odm->RFCalibrateInfo.TxPowerTrackControl)
- dm_odm->RFCalibrateInfo.ThermalValue = ThermalValue;
- }
-}
-
-/* 1 7. IQK */
-#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1 /* ms */
-
-static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_IQK_8188E(struct adapter *adapt)
-{
- u32 regeac, regE94, regE9C;
- u8 result = 0x00;
-
- /* 1 Tx IQK */
- /* path-A IQK setting */
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
-
- /* LO calibration setting */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
-
- /* One shot, path A LOK & IQK */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
-
- /* delay x ms */
- /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- mdelay(IQK_DELAY_TIME_88E);
-
- /* Check failed */
- regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
-
- if (!(regeac & BIT(28)) &&
- (((regE94 & 0x03FF0000) >> 16) != 0x142) &&
- (((regE9C & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- return result;
-}
-
-static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_RxIQK(struct adapter *adapt)
-{
- u32 regeac, regE94, regE9C, regEA4, u4tmp;
- u8 result = 0x00;
-
- /* 1 Get TXIMR setting */
- /* modify RXIQK mode table */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
- rtl8188e_PHY_SetRFReg(adapt, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- rtl8188e_PHY_SetRFReg(adapt, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- rtl8188e_PHY_SetRFReg(adapt, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- rtl8188e_PHY_SetRFReg(adapt, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
-
- /* PA,PAD off */
- rtl8188e_PHY_SetRFReg(adapt, 0xdf, bRFRegOffsetMask, 0x980);
- rtl8188e_PHY_SetRFReg(adapt, 0x56, bRFRegOffsetMask, 0x51000);
-
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
-
- /* IQK setting */
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
-
- /* path-A IQK setting */
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
-
- /* LO calibration setting */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
-
- /* One shot, path A LOK & IQK */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
-
- /* delay x ms */
- mdelay(IQK_DELAY_TIME_88E);
-
- /* Check failed */
- regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
-
- if (!(regeac & BIT(28)) &&
- (((regE94 & 0x03FF0000) >> 16) != 0x142) &&
- (((regE9C & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- else /* if Tx not OK, ignore Rx */
- return result;
-
- u4tmp = 0x80007C00 | (regE94 & 0x3FF0000) | ((regE9C & 0x3FF0000) >> 16);
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, u4tmp);
-
- /* 1 RX IQK */
- /* modify RXIQK mode table */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
- rtl8188e_PHY_SetRFReg(adapt, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- rtl8188e_PHY_SetRFReg(adapt, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- rtl8188e_PHY_SetRFReg(adapt, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- rtl8188e_PHY_SetRFReg(adapt, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
-
- /* IQK setting */
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x01004800);
-
- /* path-A IQK setting */
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
-
- /* LO calibration setting */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
-
- /* One shot, path A LOK & IQK */
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
-
- /* delay x ms */
- /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- mdelay(IQK_DELAY_TIME_88E);
-
- /* Check failed */
- regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
- regEA4 = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
-
- /* reload RF 0xdf */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
- rtl8188e_PHY_SetRFReg(adapt, 0xdf, bRFRegOffsetMask, 0x180);
-
- if (!(regeac & BIT(27)) && /* if Tx is OK, check whether Rx is OK */
- (((regEA4 & 0x03FF0000) >> 16) != 0x132) &&
- (((regeac & 0x03FF0000) >> 16) != 0x36))
- result |= 0x02;
-
- return result;
-}
-
-static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
-{
- u32 Oldval_0, X, TX0_A, reg;
- s32 Y, TX0_C;
-
- if (final_candidate == 0xFF) {
- return;
- } else if (iqkok) {
- Oldval_0 = (rtl8188e_PHY_QueryBBReg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
-
- X = result[final_candidate][0];
- if ((X & 0x00000200) != 0)
- X = X | 0xFFFFFC00;
- TX0_A = (X * Oldval_0) >> 8;
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
-
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0 >> 7) & 0x1));
-
- Y = result[final_candidate][1];
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
-
- TX0_C = (Y * Oldval_0) >> 8;
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C & 0x3C0) >> 6));
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C & 0x3F));
-
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0 >> 7) & 0x1));
-
- if (txonly)
- return;
-
- reg = result[final_candidate][2];
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0x3FF, reg);
-
- reg = result[final_candidate][3] & 0x3F;
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0xFC00, reg);
-
- reg = (result[final_candidate][3] >> 6) & 0xF;
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
- }
-}
-
-void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
-{
- u32 i;
-
- for (i = 0; i < RegisterNum; i++) {
- ADDABackup[i] = rtl8188e_PHY_QueryBBReg(adapt, ADDAReg[i], bMaskDWord);
- }
-}
-
-/* FIXME: return an error to caller */
-static void _PHY_SaveMACRegisters(
- struct adapter *adapt,
- u32 *MACReg,
- u32 *MACBackup
- )
-{
- u32 i;
- int res;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- u8 reg;
-
- res = rtw_read8(adapt, MACReg[i], &reg);
- if (res)
- return;
-
- MACBackup[i] = reg;
- }
-
- res = rtw_read32(adapt, MACReg[i], MACBackup + i);
- (void)res;
-}
-
-static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0; i < RegiesterNum; i++)
- rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, ADDABackup[i]);
-}
-
-static void
-_PHY_ReloadMACRegisters(
- struct adapter *adapt,
- u32 *MACReg,
- u32 *MACBackup
- )
-{
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- rtw_write8(adapt, MACReg[i], (u8)MACBackup[i]);
-
- rtw_write32(adapt, MACReg[i], MACBackup[i]);
-}
-
-static void
-_PHY_PathADDAOn(
- struct adapter *adapt,
- u32 *ADDAReg)
-{
- u32 i;
-
- rtl8188e_PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, 0x0bdb25a0);
-}
-
-void
-_PHY_MACSettingCalibration(
- struct adapter *adapt,
- u32 *MACReg,
- u32 *MACBackup
- )
-{
- u32 i = 0;
-
- rtw_write8(adapt, MACReg[i], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(3))));
-
- rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(5))));
-}
-
-static void _PHY_PIModeSwitch(
- struct adapter *adapt,
- bool PIMode
- )
-{
- u32 mode;
-
- mode = PIMode ? 0x01000100 : 0x01000000;
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
-}
-
-static bool phy_SimularityCompare_8188E(
- struct adapter *adapt,
- s32 resulta[][8],
- u8 c1,
- u8 c2
- )
-{
- u32 i, j, diff, sim_bitmap, bound = 0;
- u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
- bool result = true;
- s32 tmp1 = 0, tmp2 = 0;
-
- bound = 4;
- sim_bitmap = 0;
-
- for (i = 0; i < bound; i++) {
- if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) {
- if ((resulta[c1][i] & 0x00000200) != 0)
- tmp1 = resulta[c1][i] | 0xFFFFFC00;
- else
- tmp1 = resulta[c1][i];
-
- if ((resulta[c2][i] & 0x00000200) != 0)
- tmp2 = resulta[c2][i] | 0xFFFFFC00;
- else
- tmp2 = resulta[c2][i];
- } else {
- tmp1 = resulta[c1][i];
- tmp2 = resulta[c2][i];
- }
-
- diff = abs(tmp1 - tmp2);
-
- if (diff > MAX_TOLERANCE) {
- if ((i == 2 || i == 6) && !sim_bitmap) {
- if (resulta[c1][i] + resulta[c1][i + 1] == 0)
- final_candidate[(i / 4)] = c2;
- else if (resulta[c2][i] + resulta[c2][i + 1] == 0)
- final_candidate[(i / 4)] = c1;
- else
- sim_bitmap = sim_bitmap | (1 << i);
- } else {
- sim_bitmap = sim_bitmap | (1 << i);
- }
- }
- }
-
- if (sim_bitmap == 0) {
- for (i = 0; i < (bound / 4); i++) {
- if (final_candidate[i] != 0xFF) {
- for (j = i * 4; j < (i + 1) * 4 - 2; j++)
- resulta[3][j] = resulta[final_candidate[i]][j];
- result = false;
- }
- }
- return result;
- } else {
- if (!(sim_bitmap & 0x03)) { /* path A TX OK */
- for (i = 0; i < 2; i++)
- resulta[3][i] = resulta[c1][i];
- }
- if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
- for (i = 2; i < 4; i++)
- resulta[3][i] = resulta[c1][i];
- }
-
- if (!(sim_bitmap & 0x30)) { /* path B TX OK */
- for (i = 4; i < 6; i++)
- resulta[3][i] = resulta[c1][i];
- }
-
- if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
- for (i = 6; i < 8; i++)
- resulta[3][i] = resulta[c1][i];
- }
- return false;
- }
-}
-
-static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t)
-{
- struct hal_data_8188e *pHalData = &adapt->haldata;
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- u32 i;
- u8 PathAOK;
- u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
- rFPGA0_XCD_SwitchControl, rBlue_Tooth,
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN };
- u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
- REG_TXPAUSE, REG_BCN_CTRL,
- REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
-
- /* since 92C & 92D have the different define in IQK_BB_REG */
- u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
- rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
- rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
- rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
- rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
- };
- u32 retryCount = 2;
- /* Note: IQ calibration must be performed after loading */
- /* PHY_REG.txt , and radio_a, radio_b.txt */
-
- if (t == 0) {
- /* Save ADDA parameters, turn Path A ADDA on */
- _PHY_SaveADDARegisters(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
- _PHY_SaveMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
- _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
- }
-
- _PHY_PathADDAOn(adapt, ADDA_REG);
- if (t == 0)
- dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(adapt, rFPGA0_XA_HSSIParameter1, BIT(8));
-
- if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
- /* Switch BB to PI mode to do IQ Calibration. */
- _PHY_PIModeSwitch(adapt, true);
- }
-
- /* BB setting */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_RFMOD, BIT(24), 0x00);
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
- rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
-
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
-
- /* MAC settings */
- _PHY_MACSettingCalibration(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
-
- /* Page B init */
- /* AP or IQK */
- rtl8188e_PHY_SetBBReg(adapt, rConfig_AntA, bMaskDWord, 0x0f600000);
-
-
- /* IQ calibration setting */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
-
- for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_IQK_8188E(adapt);
- if (PathAOK == 0x01) {
- result[t][0] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][1] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
- break;
- }
- }
-
- for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_RxIQK(adapt);
- if (PathAOK == 0x03) {
- result[t][2] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][3] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
- break;
- }
- }
-
- /* Back to BB mode, load original value */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0);
-
- if (t != 0) {
- if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
- /* Switch back BB to SI mode after finish IQ Calibration. */
- _PHY_PIModeSwitch(adapt, false);
- }
-
- /* Reload ADDA power saving parameters */
- reload_adda_reg(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
-
- /* Reload MAC parameters */
- _PHY_ReloadMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
-
- reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
-
- /* Restore RX initial gain */
- rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
-
- /* load 0xe30 IQC default value */
- rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- }
-}
-
-static void phy_LCCalibrate_8188E(struct adapter *adapt)
-{
- u8 tmpreg;
- u32 RF_Amode = 0, LC_Cal;
- int res;
-
- /* Check continuous TX and Packet TX */
- res = rtw_read8(adapt, 0xd03, &tmpreg);
- if (res)
- return;
-
- if ((tmpreg & 0x70) != 0) /* Deal with contisuous TX case */
- rtw_write8(adapt, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
- else /* Deal with Packet TX case */
- rtw_write8(adapt, REG_TXPAUSE, 0xFF); /* block all queues */
-
- if ((tmpreg & 0x70) != 0) {
- /* 1. Read original RF mode */
- /* Path-A */
- RF_Amode = rtl8188e_PHY_QueryRFReg(adapt, RF_AC, bMask12Bits);
-
- /* 2. Set RF mode = standby mode */
- /* Path-A */
- rtl8188e_PHY_SetRFReg(adapt, RF_AC, bMask12Bits, (RF_Amode & 0x8FFFF) | 0x10000);
- }
-
- /* 3. Read RF reg18 */
- LC_Cal = rtl8188e_PHY_QueryRFReg(adapt, RF_CHNLBW, bMask12Bits);
-
- /* 4. Set LC calibration begin bit15 */
- rtl8188e_PHY_SetRFReg(adapt, RF_CHNLBW, bMask12Bits, LC_Cal | 0x08000);
-
- msleep(100);
-
- /* Restore original situation */
- if ((tmpreg & 0x70) != 0) {
- /* Deal with continuous TX case */
- /* Path-A */
- rtw_write8(adapt, 0xd03, tmpreg);
- rtl8188e_PHY_SetRFReg(adapt, RF_AC, bMask12Bits, RF_Amode);
- } else {
- /* Deal with Packet TX case */
- rtw_write8(adapt, REG_TXPAUSE, 0x00);
- }
-}
-
-void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
-{
- struct hal_data_8188e *pHalData = &adapt->haldata;
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- s32 result[4][8]; /* last is final result */
- u8 i, final_candidate;
- bool pathaok;
- s32 RegE94, RegE9C, RegEA4, RegEB4, RegEBC;
- bool is12simular, is13simular, is23simular;
- u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
- rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance,
- rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable,
- rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
- rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
- rOFDM0_RxIQExtAnta};
-
- if (recovery) {
- reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
- return;
- }
-
- for (i = 0; i < 8; i++) {
- result[0][i] = 0;
- result[1][i] = 0;
- result[2][i] = 0;
- if ((i == 0) || (i == 2) || (i == 4) || (i == 6))
- result[3][i] = 0x100;
- else
- result[3][i] = 0;
- }
- final_candidate = 0xff;
- pathaok = false;
- is12simular = false;
- is23simular = false;
- is13simular = false;
-
- for (i = 0; i < 3; i++) {
- phy_IQCalibrate_8188E(adapt, result, i);
-
- if (i == 1) {
- is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
- if (is12simular) {
- final_candidate = 0;
- break;
- }
- }
-
- if (i == 2) {
- is13simular = phy_SimularityCompare_8188E(adapt, result, 0, 2);
- if (is13simular) {
- final_candidate = 0;
-
- break;
- }
- is23simular = phy_SimularityCompare_8188E(adapt, result, 1, 2);
- if (is23simular) {
- final_candidate = 1;
- } else {
- final_candidate = 3;
- }
- }
- }
-
- for (i = 0; i < 4; i++) {
- RegE94 = result[i][0];
- RegE9C = result[i][1];
- RegEA4 = result[i][2];
- RegEB4 = result[i][4];
- RegEBC = result[i][5];
- }
-
- if (final_candidate != 0xff) {
- RegE94 = result[final_candidate][0];
- RegE9C = result[final_candidate][1];
- RegEA4 = result[final_candidate][2];
- RegEB4 = result[final_candidate][4];
- RegEBC = result[final_candidate][5];
- dm_odm->RFCalibrateInfo.RegE94 = RegE94;
- dm_odm->RFCalibrateInfo.RegE9C = RegE9C;
- dm_odm->RFCalibrateInfo.RegEB4 = RegEB4;
- dm_odm->RFCalibrateInfo.RegEBC = RegEBC;
- pathaok = true;
- } else {
- dm_odm->RFCalibrateInfo.RegE94 = 0x100;
- dm_odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
- dm_odm->RFCalibrateInfo.RegE9C = 0x0;
- dm_odm->RFCalibrateInfo.RegEBC = 0x0; /* Y default value */
- }
- if (RegE94 != 0)
- patha_fill_iqk(adapt, pathaok, result, final_candidate, (RegEA4 == 0));
-
- _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
-}
-
-void PHY_LCCalibrate_8188E(struct adapter *adapt)
-{
- u32 timeout = 2000, timecount = 0;
- struct hal_data_8188e *pHalData = &adapt->haldata;
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- while (*dm_odm->pbScanInProcess && timecount < timeout) {
- mdelay(50);
- timecount += 50;
- }
-
- phy_LCCalibrate_8188E(adapt);
-}
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
deleted file mode 100644
index 6c0b1368383d..000000000000
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/HalPwrSeqCmd.h"
-
-#define PWR_CMD_WRITE 0x01
- /* offset: the read register offset */
- /* msk: the mask of the write bits */
- /* value: write value */
- /* note: driver shall implement this cmd by read & msk after write */
-
-#define PWR_CMD_POLLING 0x02
- /* offset: the read register offset */
- /* msk: the mask of the polled value */
- /* value: the value to be polled, masked by the msd field. */
- /* note: driver shall implement this cmd by */
- /* do{ */
- /* if ( (Read(offset) & msk) == (value & msk) ) */
- /* break; */
- /* } while (not timeout); */
-
-#define PWR_CMD_DELAY 0x03
- /* offset: the value to delay (in us) */
- /* msk: N/A */
- /* value: N/A */
-
-struct wl_pwr_cfg {
- u16 offset;
- u8 cmd:4;
- u8 msk;
- u8 value;
-};
-
-#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
-#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
-#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
-#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
-
-static struct wl_pwr_cfg rtl8188E_power_on_flow[] = {
- { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) },
- { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */
- { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
- { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/
- { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/
- { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) },
- { 0x0005, PWR_CMD_POLLING, BIT(0), 0 },
- { 0x0023, PWR_CMD_WRITE, BIT(4), 0 },
-};
-
-static struct wl_pwr_cfg rtl8188E_card_disable_flow[] = {
- { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */
- { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */
- { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */
- { 0x0005, PWR_CMD_POLLING, BIT(1), 0 },
- { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
- { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */
- { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */
- { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */
- { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */
-};
-
-/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
-static struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = {
- { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */
- { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
- { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */
- { 0x0002, PWR_CMD_DELAY, 0, 0 },
- { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */
- { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */
- { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */
-};
-
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq)
-{
- struct wl_pwr_cfg pwrcfgcmd = {0};
- struct wl_pwr_cfg *pwrseqcmd;
- u8 poll_bit = false;
- u8 idx, num_steps;
- u8 value = 0;
- u32 offset = 0;
- u32 poll_count = 0; /* polling autoload done. */
- u32 max_poll_count = 5000;
- int res;
-
- switch (seq) {
- case PWR_ON_FLOW:
- pwrseqcmd = rtl8188E_power_on_flow;
- num_steps = ARRAY_SIZE(rtl8188E_power_on_flow);
- break;
- case DISABLE_FLOW:
- pwrseqcmd = rtl8188E_card_disable_flow;
- num_steps = ARRAY_SIZE(rtl8188E_card_disable_flow);
- break;
- case LPS_ENTER_FLOW:
- pwrseqcmd = rtl8188E_enter_lps_flow;
- num_steps = ARRAY_SIZE(rtl8188E_enter_lps_flow);
- break;
- default:
- return false;
- }
-
- for (idx = 0; idx < num_steps; idx++) {
- pwrcfgcmd = pwrseqcmd[idx];
-
- switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
- case PWR_CMD_WRITE:
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
-
- /* Read the value from system register */
- res = rtw_read8(padapter, offset, &value);
- if (res)
- return false;
-
- value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
- value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
-
- /* Write the value back to system register */
- rtw_write8(padapter, offset, value);
- break;
- case PWR_CMD_POLLING:
- poll_bit = false;
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
- do {
- res = rtw_read8(padapter, offset, &value);
- if (res)
- return false;
-
- value &= GET_PWR_CFG_MASK(pwrcfgcmd);
- if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
- poll_bit = true;
- else
- udelay(10);
-
- if (poll_count++ > max_poll_count)
- return false;
- } while (!poll_bit);
- break;
- case PWR_CMD_DELAY:
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
- break;
- default:
- break;
- }
- }
- return true;
-}
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
deleted file mode 100644
index 33967eb3c0d0..000000000000
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-
-#include "../include/hal_intf.h"
-#include "../include/hal_com.h"
-#include "../include/rtl8188e_hal.h"
-
-#define _HAL_INIT_C_
-
-#define CHAN_PLAN_HW 0x80
-
-u8 /* return the final channel plan decision */
-hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan,
- u8 sw_channel_plan, u8 def_channel_plan,
- bool load_fail)
-{
- u8 sw_cfg;
- u8 chnlplan;
-
- sw_cfg = true;
- if (!load_fail) {
- if (!rtw_is_channel_plan_valid(sw_channel_plan))
- sw_cfg = false;
- if (hw_channel_plan & CHAN_PLAN_HW)
- sw_cfg = false;
- }
-
- if (sw_cfg)
- chnlplan = sw_channel_plan;
- else
- chnlplan = hw_channel_plan & (~CHAN_PLAN_HW);
-
- if (!rtw_is_channel_plan_valid(chnlplan))
- chnlplan = def_channel_plan;
-
- return chnlplan;
-}
-
-u8 MRateToHwRate(u8 rate)
-{
- u8 ret = DESC_RATE1M;
-
- switch (rate) {
- /* CCK and OFDM non-HT rates */
- case IEEE80211_CCK_RATE_1MB:
- ret = DESC_RATE1M;
- break;
- case IEEE80211_CCK_RATE_2MB:
- ret = DESC_RATE2M;
- break;
- case IEEE80211_CCK_RATE_5MB:
- ret = DESC_RATE5_5M;
- break;
- case IEEE80211_CCK_RATE_11MB:
- ret = DESC_RATE11M;
- break;
- case IEEE80211_OFDM_RATE_6MB:
- ret = DESC_RATE6M;
- break;
- case IEEE80211_OFDM_RATE_9MB:
- ret = DESC_RATE9M;
- break;
- case IEEE80211_OFDM_RATE_12MB:
- ret = DESC_RATE12M;
- break;
- case IEEE80211_OFDM_RATE_18MB:
- ret = DESC_RATE18M;
- break;
- case IEEE80211_OFDM_RATE_24MB:
- ret = DESC_RATE24M;
- break;
- case IEEE80211_OFDM_RATE_36MB:
- ret = DESC_RATE36M;
- break;
- case IEEE80211_OFDM_RATE_48MB:
- ret = DESC_RATE48M;
- break;
- case IEEE80211_OFDM_RATE_54MB:
- ret = DESC_RATE54M;
- break;
- default:
- break;
- }
- return ret;
-}
-
-void HalSetBrateCfg(struct adapter *adapt, u8 *brates, u16 *rate_cfg)
-{
- u8 i, is_brate, brate;
-
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- is_brate = brates[i] & IEEE80211_BASIC_RATE_MASK;
- brate = brates[i] & 0x7f;
-
- if (is_brate) {
- switch (brate) {
- case IEEE80211_CCK_RATE_1MB:
- *rate_cfg |= RATE_1M;
- break;
- case IEEE80211_CCK_RATE_2MB:
- *rate_cfg |= RATE_2M;
- break;
- case IEEE80211_CCK_RATE_5MB:
- *rate_cfg |= RATE_5_5M;
- break;
- case IEEE80211_CCK_RATE_11MB:
- *rate_cfg |= RATE_11M;
- break;
- case IEEE80211_OFDM_RATE_6MB:
- *rate_cfg |= RATE_6M;
- break;
- case IEEE80211_OFDM_RATE_9MB:
- *rate_cfg |= RATE_9M;
- break;
- case IEEE80211_OFDM_RATE_12MB:
- *rate_cfg |= RATE_12M;
- break;
- case IEEE80211_OFDM_RATE_18MB:
- *rate_cfg |= RATE_18M;
- break;
- case IEEE80211_OFDM_RATE_24MB:
- *rate_cfg |= RATE_24M;
- break;
- case IEEE80211_OFDM_RATE_36MB:
- *rate_cfg |= RATE_36M;
- break;
- case IEEE80211_OFDM_RATE_48MB:
- *rate_cfg |= RATE_48M;
- break;
- case IEEE80211_OFDM_RATE_54MB:
- *rate_cfg |= RATE_54M;
- break;
- }
- }
- }
-}
diff --git a/drivers/staging/r8188eu/hal/hal_intf.c b/drivers/staging/r8188eu/hal/hal_intf.c
deleted file mode 100644
index 13790e32f11c..000000000000
--- a/drivers/staging/r8188eu/hal/hal_intf.c
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _HAL_INTF_C_
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/hal_intf.h"
-
-uint rtw_hal_init(struct adapter *adapt)
-{
- adapt->hw_init_completed = false;
-
- if (rtl8188eu_hal_init(adapt) != _SUCCESS)
- return _FAIL;
-
- adapt->hw_init_completed = true;
-
- if (adapt->registrypriv.notch_filter == 1)
- hal_notch_filter_8188e(adapt, 1);
-
- return _SUCCESS;
-}
-
-uint rtw_hal_deinit(struct adapter *adapt)
-{
- uint status = _SUCCESS;
-
- status = rtl8188eu_hal_deinit(adapt);
-
- if (status == _SUCCESS)
- adapt->hw_init_completed = false;
-
- return status;
-}
-
-void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
-{
- struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &adapt->stapriv;
- if (mac_id >= 2)
- psta = pstapriv->sta_aid[(mac_id - 1) - 1];
- if (psta)
- add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
- } else {
- UpdateHalRAMask8188EUsb(adapt, mac_id, rssi_level);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/odm.c b/drivers/staging/r8188eu/hal/odm.c
deleted file mode 100644
index 94f9b125d860..000000000000
--- a/drivers/staging/r8188eu/hal/odm.c
+++ /dev/null
@@ -1,821 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-/* avoid to warn in FreeBSD ==> To DO modify */
-static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
- /* UL DL */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 0:unknown AP */
- {0xa44f, 0x5ea44f, 0x5e431c}, /* 1:realtek AP */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 2:unknown AP => realtek_92SE */
- {0x5ea32b, 0x5ea42b, 0x5e4322}, /* 3:broadcom AP */
- {0x5ea422, 0x00a44f, 0x00a44f}, /* 4:ralink AP */
- {0x5ea322, 0x00a630, 0x00a44f}, /* 5:atheros AP */
- {0x5e4322, 0x5e4322, 0x5e4322},/* 6:cisco AP */
- {0x5ea44f, 0x00a44f, 0x5ea42b}, /* 8:marvell AP */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 10:unknown AP=> 92U AP */
- {0x5ea42b, 0xa630, 0x5e431c}, /* 11:airgocap AP */
-};
-
-/* Global var */
-u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c,/* 37, -12.5dB */
- 0x0e400039,/* 38, -13.0dB */
- 0x0d800036,/* 39, -13.5dB */
- 0x0cc00033,/* 40, -14.0dB */
- 0x0c000030,/* 41, -14.5dB */
- 0x0b40002d,/* 42, -15.0dB */
-};
-
-u8 cck_swing_table[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-#define RxDefaultAnt1 0x65a9
-#define RxDefaultAnt2 0x569a
-
-static void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- struct adapter *adapter = pDM_Odm->Adapter;
-
- pDM_DigTable->CurIGValue = (u8)rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- pDM_DigTable->CurCCK_CCAThres = 0x83;
- pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
- pDM_DigTable->LargeFAHit = 0;
- pDM_DigTable->Recover_cnt = 0;
- pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
- pDM_DigTable->bMediaConnect_0 = false;
-
- /* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
- pDM_Odm->bDMInitialGainEnable = true;
-}
-
-static void odm_DIG(struct odm_dm_struct *pDM_Odm)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- u8 DIG_Dynamic_MIN;
- u8 DIG_MaxOfMin;
- bool FirstConnect, FirstDisConnect;
- u8 dm_dig_max, dm_dig_min;
- u8 CurrentIGI = pDM_DigTable->CurIGValue;
-
- if (*pDM_Odm->pbScanInProcess)
- return;
-
- /* add by Neil Chen to avoid PSD is processing */
- if (!pDM_Odm->bDMInitialGainEnable)
- return;
-
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
-
- /* 1 Boundary Decision */
- dm_dig_max = DM_DIG_MAX_NIC;
- dm_dig_min = DM_DIG_MIN_NIC;
- DIG_MaxOfMin = DM_DIG_MAX_AP;
-
- if (pDM_Odm->bLinked) {
- /* 2 8723A Series, offset need to be 10 */
- /* 2 Modify DIG upper bound */
- if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
- pDM_DigTable->rx_gain_range_max = dm_dig_min;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
- /* 2 Modify DIG lower bound */
- if (pDM_Odm->bOneEntryOnly) {
- if (pDM_Odm->RSSI_Min < dm_dig_min)
- DIG_Dynamic_MIN = dm_dig_min;
- else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
- DIG_Dynamic_MIN = DIG_MaxOfMin;
- else
- DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- } else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
- /* 1 Lower Bound for 88E AntDiv */
- if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV)
- DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
- } else {
- DIG_Dynamic_MIN = dm_dig_min;
- }
- } else {
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- DIG_Dynamic_MIN = dm_dig_min;
- }
-
- /* 1 Modify DIG lower bound, deal with abnormally large false alarm */
- if (pFalseAlmCnt->Cnt_all > 10000) {
- if (pDM_DigTable->LargeFAHit != 3)
- pDM_DigTable->LargeFAHit++;
- if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
- pDM_DigTable->ForbiddenIGI = CurrentIGI;
- pDM_DigTable->LargeFAHit = 1;
- }
-
- if (pDM_DigTable->LargeFAHit >= 3) {
- if ((pDM_DigTable->ForbiddenIGI + 1) > pDM_DigTable->rx_gain_range_max)
- pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
- else
- pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- pDM_DigTable->Recover_cnt = 3600; /* 3600=2hr */
- }
-
- } else {
- /* Recovery mechanism for IGI lower bound */
- if (pDM_DigTable->Recover_cnt != 0) {
- pDM_DigTable->Recover_cnt--;
- } else {
- if (pDM_DigTable->LargeFAHit < 3) {
- if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
- pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- } else {
- pDM_DigTable->ForbiddenIGI--;
- pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- }
- } else {
- pDM_DigTable->LargeFAHit = 0;
- }
- }
- }
-
- /* 1 Adjust initial gain by false alarm */
- if (pDM_Odm->bLinked) {
- if (FirstConnect) {
- CurrentIGI = pDM_Odm->RSSI_Min;
- } else {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- }
- } else {
- if (FirstDisConnect) {
- CurrentIGI = pDM_DigTable->rx_gain_range_min;
- } else {
- /* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
- if (pFalseAlmCnt->Cnt_all > 10000)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > 8000)
- CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < 500)
- CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- }
- }
- /* 1 Check initial gain by upper/lower bound */
- if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
- CurrentIGI = pDM_DigTable->rx_gain_range_max;
- if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
- CurrentIGI = pDM_DigTable->rx_gain_range_min;
-
- /* 2 High power RSSI threshold */
-
- ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
- pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
- pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
-}
-
-static void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *adapter = pDM_Odm->Adapter;
-
- pDM_Odm->bCckHighPower = (bool)rtl8188e_PHY_QueryBBReg(adapter, 0x824, BIT(9));
- pDM_Odm->RFPathRxEnable = (u8)rtl8188e_PHY_QueryBBReg(adapter, 0xc04, 0x0F);
-}
-
-static void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
-{
- u8 EntryCnt = 0;
- u8 i;
- struct sta_info *pEntry;
-
- if (*pDM_Odm->pBandWidth == HT_CHANNEL_WIDTH_40) {
- if (*pDM_Odm->pSecChOffset == 1)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
- else if (*pDM_Odm->pSecChOffset == 2)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
- } else {
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- pEntry = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pEntry))
- EntryCnt++;
- }
- if (EntryCnt == 1)
- pDM_Odm->bOneEntryOnly = true;
- else
- pDM_Odm->bOneEntryOnly = false;
-}
-
-static void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
-{
- struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
-
- pOdmRA->RATRState = DM_RATR_STA_INIT;
- pOdmRA->HighRSSIThresh = 50;
- pOdmRA->LowRSSIThresh = 20;
-}
-
-static void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
-{
- u8 i;
- struct adapter *pAdapter = pDM_Odm->Adapter;
-
- if (pAdapter->bDriverStopped)
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
-
- if (IS_STA_VALID(pstat)) {
- if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
- rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
- }
- }
-}
-
-static void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- pDM_PSTable->pre_rf_state = RF_MAX;
- pDM_PSTable->cur_rf_state = RF_MAX;
- pDM_PSTable->initialize = 0;
-}
-
-static void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
-{
- u32 ret_value;
- struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- struct adapter *adapter = pDM_Odm->Adapter;
-
- /* hold ofdm counter */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
-
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
-
- FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
-
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_BW_USC = ((ret_value & 0xffff0000) >> 16);
-
- /* hold cck counter */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
-
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
- FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
-
- ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
-
- FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail +
- FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Cck_fail);
-
- FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-}
-
-static void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
-{
- u8 CurCCK_CCAThres;
- struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
-
- if (pDM_Odm->bLinked) {
- if (pDM_Odm->RSSI_Min > 25) {
- CurCCK_CCAThres = 0xcd;
- } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) {
- CurCCK_CCAThres = 0x83;
- } else {
- if (FalseAlmCnt->Cnt_Cck_fail > 1000)
- CurCCK_CCAThres = 0x83;
- else
- CurCCK_CCAThres = 0x40;
- }
- } else {
- if (FalseAlmCnt->Cnt_Cck_fail > 1000)
- CurCCK_CCAThres = 0x83;
- else
- CurCCK_CCAThres = 0x40;
- }
- ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
-}
-
-static void FindMinimumRSSI(struct adapter *pAdapter)
-{
- struct hal_data_8188e *pHalData = &pAdapter->haldata;
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
-
- /* 1 1.Determine the minimum RSSI */
- if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
- pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0)
- pdmpriv->MinUndecoratedPWDBForDM = 0;
-
- pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
-}
-
-static void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- int i;
- int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
- u8 sta_cnt = 0;
- u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
- struct sta_info *psta;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
- return;
-
- if (!check_fwstate(&Adapter->mlmepriv, _FW_LINKED))
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- psta = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(psta) &&
- (psta->state & WIFI_ASOC_STATE) &&
- !is_broadcast_ether_addr(psta->hwaddr) &&
- memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
- if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
- tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
-
- if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
- tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
- if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
- PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB << 16));
- }
- }
-
- for (i = 0; i < sta_cnt; i++) {
- if (PWDB_rssi[i] != (0)) {
- if (pHalData->fw_ractrl) {
- /* Report every sta's RSSI to FW */
- } else {
- ODM_RA_SetRSSI_8188E(
- &pHalData->odmpriv, (PWDB_rssi[i] & 0xFF), (u8)((PWDB_rssi[i] >> 16) & 0xFF));
- }
- }
- }
-
- if (tmpEntryMinPWDB != 0xff) /* If associated entry is found */
- pdmpriv->EntryMinUndecoratedSmoothedPWDB = tmpEntryMinPWDB;
- else
- pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
-
- FindMinimumRSSI(Adapter);
- pHalData->odmpriv.RSSI_Min = pdmpriv->MinUndecoratedPWDBForDM;
-}
-
-static void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
-{
- pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
-}
-
-static void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
-{
- if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
- return;
-
- ODM_AntennaDiversityInit_88E(pDM_Odm);
-}
-
-static void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
-{
- if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
- return;
-
- ODM_AntennaDiversity_88E(pDM_Odm);
-}
-
-static void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
- pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
- Adapter->recvpriv.bIsAnyNonBEPkts = false;
-}
-
-static void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- u32 trafficIndex;
- u32 edca_param;
- u64 cur_tx_bytes = 0;
- u64 cur_rx_bytes = 0;
- u8 bbtchange = false;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
- struct recv_priv *precvpriv = &Adapter->recvpriv;
- struct registry_priv *pregpriv = &Adapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pregpriv->wifi_spec == 1)
- goto dm_CheckEdcaTurbo_EXIT;
-
- if (pmlmeinfo->assoc_AP_vendor >= HT_IOT_PEER_MAX)
- goto dm_CheckEdcaTurbo_EXIT;
-
- /* Check if the status needs to be changed. */
- if ((bbtchange) || (!precvpriv->bIsAnyNonBEPkts)) {
- cur_tx_bytes = pxmitpriv->tx_bytes - pxmitpriv->last_tx_bytes;
- cur_rx_bytes = precvpriv->rx_bytes - precvpriv->last_rx_bytes;
-
- /* traffic, TX or RX */
- if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK) ||
- (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS)) {
- if (cur_tx_bytes > (cur_rx_bytes << 2)) {
- /* Uplink TP is present. */
- trafficIndex = UP_LINK;
- } else {
- /* Balance TP is present. */
- trafficIndex = DOWN_LINK;
- }
- } else {
- if (cur_rx_bytes > (cur_tx_bytes << 2)) {
- /* Downlink TP is present. */
- trafficIndex = DOWN_LINK;
- } else {
- /* Balance TP is present. */
- trafficIndex = UP_LINK;
- }
- }
-
- if ((pDM_Odm->DM_EDCA_Table.prv_traffic_idx != trafficIndex) || (!pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA)) {
- if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_CISCO) && (pmlmeext->cur_wireless_mode & WIRELESS_11_24N))
- edca_param = EDCAParam[pmlmeinfo->assoc_AP_vendor][trafficIndex];
- else
- edca_param = EDCAParam[HT_IOT_PEER_UNKNOWN][trafficIndex];
-
- rtw_write32(Adapter, REG_EDCA_BE_PARAM, edca_param);
-
- pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
- }
-
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
- } else {
- /* Turn Off EDCA turbo here. */
- /* Restore original EDCA according to the declaration of AP. */
- if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
- rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
- }
- }
-
-dm_CheckEdcaTurbo_EXIT:
- /* Set variables for next time. */
- precvpriv->bIsAnyNonBEPkts = false;
- pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
- precvpriv->last_rx_bytes = precvpriv->rx_bytes;
-}
-
-/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfInit(pDM_Odm);
- odm_DIGInit(pDM_Odm);
- odm_RateAdaptiveMaskInit(pDM_Odm);
-
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
-}
-
-/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
-/* You can not add any dummy function here, be care, you can only use DM structure */
-/* to perform any new ODM_DM. */
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfUpdate(pDM_Odm);
- odm_FalseAlarmCounterStatistics(pDM_Odm);
- odm_RSSIMonitorCheck(pDM_Odm);
-
- odm_DIG(pDM_Odm);
- odm_CCKPacketDetectionThresh(pDM_Odm);
-
- if (*pDM_Odm->pbPowerSaving)
- return;
-
- odm_RefreshRateAdaptiveMask(pDM_Odm);
-
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_HwAntDiv(pDM_Odm);
-
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
-}
-
-/* Init /.. Fixed HW value. Only init time. */
-void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
-{
- /* This section is used for init value */
- switch (CmnInfo) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_MP_TEST_CHIP:
- pDM_Odm->bIsMPChip = (u8)Value;
- break;
- case ODM_CMNINFO_RF_ANTENNA_TYPE:
- pDM_Odm->AntDivType = (u8)Value;
- break;
- default:
- /* do nothing */
- break;
- }
-
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
- pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
- pDM_Odm->BbSwingIdxOfdmCurrent = 12;
- pDM_Odm->BbSwingFlagOfdm = false;
-}
-
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- struct adapter *adapter = pDM_Odm->Adapter;
-
- if (pDM_DigTable->CurIGValue != CurrentIGI) {
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
- pDM_DigTable->CurIGValue = CurrentIGI;
- }
-}
-
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- rtw_write8(pDM_Odm->Adapter, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
- pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
-}
-
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
- struct adapter *adapter = pDM_Odm->Adapter;
- u8 Rssi_Up_bound = 30;
- u8 Rssi_Low_bound = 25;
-
- if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->reg_874 = (rtl8188e_PHY_QueryBBReg(adapter, 0x874, bMaskDWord) & 0x1CC000) >> 14;
- pDM_PSTable->reg_c70 = (rtl8188e_PHY_QueryBBReg(adapter, 0xc70, bMaskDWord) & BIT(3)) >> 3;
- pDM_PSTable->reg_85c = (rtl8188e_PHY_QueryBBReg(adapter, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
- pDM_PSTable->reg_a74 = (rtl8188e_PHY_QueryBBReg(adapter, 0xa74, bMaskDWord) & 0xF000) >> 12;
- pDM_PSTable->initialize = 1;
- }
-
- if (!bForceInNormal) {
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->pre_rf_state == RF_Normal) {
- if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
- pDM_PSTable->cur_rf_state = RF_Save;
- else
- pDM_PSTable->cur_rf_state = RF_Normal;
- } else {
- if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
- pDM_PSTable->cur_rf_state = RF_Normal;
- else
- pDM_PSTable->cur_rf_state = RF_Save;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_MAX;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_Normal;
- }
-
- if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
- if (pDM_PSTable->cur_rf_state == RF_Save) {
- rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
- rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
- rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
- rtl8188e_PHY_SetBBReg(adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
- rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
- rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0); /* Reg818[28]=1'b0 */
- rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x1); /* Reg818[28]=1'b1 */
- } else {
- rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1CC000, pDM_PSTable->reg_874);
- rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), pDM_PSTable->reg_c70);
- rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
- rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, pDM_PSTable->reg_a74);
- rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0);
- }
- pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
- }
-}
-
-u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
-{
- struct sta_info *pEntry;
- u32 rate_bitmap = 0x0fffffff;
- u8 WirelessMode;
-
- pEntry = pDM_Odm->pODM_StaInfo[macid];
- if (!IS_STA_VALID(pEntry))
- return ra_mask;
-
- WirelessMode = pEntry->wireless_mode;
-
- switch (WirelessMode) {
- case ODM_WM_B:
- if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
- rate_bitmap = 0x0000000d;
- else
- rate_bitmap = 0x0000000f;
- break;
- case (ODM_WM_B | ODM_WM_G):
- if (rssi_level == DM_RATR_STA_HIGH)
- rate_bitmap = 0x00000f00;
- else if (rssi_level == DM_RATR_STA_MIDDLE)
- rate_bitmap = 0x00000ff0;
- else
- rate_bitmap = 0x00000ff5;
- break;
- case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (*pDM_Odm->pBandWidth == HT_CHANNEL_WIDTH_40)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
- break;
- default:
- /* case WIRELESS_11_24N: */
- rate_bitmap = 0x0fffffff;
- break;
- }
-
- return rate_bitmap;
-}
-
-/* Return Value: bool */
-/* - true: RATRState is changed. */
-bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
-{
- struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
- const u8 GoUpGap = 5;
- u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
- u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
- u8 RATRState;
-
- /* Threshold Adjustment: */
- /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
- /* Here GoUpGap is added to solve the boundary's level alternation issue. */
- switch (*pRATRState) {
- case DM_RATR_STA_INIT:
- case DM_RATR_STA_HIGH:
- break;
- case DM_RATR_STA_MIDDLE:
- HighRSSIThreshForRA += GoUpGap;
- break;
- case DM_RATR_STA_LOW:
- HighRSSIThreshForRA += GoUpGap;
- LowRSSIThreshForRA += GoUpGap;
- break;
- default:
- break;
- }
-
- /* Decide RATRState by RSSI. */
- if (RSSI > HighRSSIThreshForRA)
- RATRState = DM_RATR_STA_HIGH;
- else if (RSSI > LowRSSIThreshForRA)
- RATRState = DM_RATR_STA_MIDDLE;
- else
- RATRState = DM_RATR_STA_LOW;
-
- if (*pRATRState != RATRState || bForceUpdate) {
- *pRATRState = RATRState;
- return true;
- }
- return false;
-}
-
-void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
-
- if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
- rtl8188e_PHY_SetRFReg(Adapter, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
-
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
- return;
- } else {
- odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
- }
-}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
deleted file mode 100644
index 38f357e8aeda..000000000000
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ /dev/null
@@ -1,349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-static u8 odm_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static s32 odm_signal_scale_mapping(struct odm_dm_struct *dm_odm, s32 currsig)
-{
- s32 retsig;
-
- if (currsig >= 51 && currsig <= 100)
- retsig = 100;
- else if (currsig >= 41 && currsig <= 50)
- retsig = 80 + ((currsig - 40) * 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 10 && currsig <= 20)
- retsig = 42 + (((currsig - 10) * 2) / 3);
- else if (currsig >= 5 && currsig <= 9)
- retsig = 22 + (((currsig - 5) * 3) / 2);
- else if (currsig >= 1 && currsig <= 4)
- retsig = 6 + (((currsig - 1) * 3) / 2);
- else
- retsig = currsig;
-
- return retsig;
-}
-
-static u8 odm_evm_db_to_percentage(s8 value)
-{
- /* -33dB~0dB to 0%~99% */
- s8 ret_val = clamp(-value, 0, 33) * 3;
-
- if (ret_val == 99)
- ret_val = 100;
-
- return ret_val;
-}
-
-static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_per_pkt_info *pPktinfo,
- struct adapter *adapt)
-{
- u8 i, Max_spatial_stream;
- s8 rx_pwr[4], rx_pwr_all = 0;
- u8 EVM, PWDB_ALL = 0;
- u8 RSSI, total_rssi = 0;
- u8 isCCKrate = 0;
- u8 rf_rx_num = 0;
- u8 cck_highpwr = 0;
- u8 LNA_idx, VGA_idx;
-
- struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
-
- isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
-
- if (isCCKrate) {
- u8 cck_agc_rpt;
-
- /* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
-
- cck_highpwr = dm_odm->bCckHighPower;
-
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
-
- /* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
- /* The RSSI formula should be modified according to the gain table */
- /* In 88E, cck_highpwr is always set to 1 */
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
- switch (LNA_idx) {
- case 7:
- if (VGA_idx <= 27)
- rx_pwr_all = -100 + 2 * (27 - VGA_idx); /* VGA_idx = 27~2 */
- else
- rx_pwr_all = -100;
- break;
- case 6:
- rx_pwr_all = -48 + 2 * (2 - VGA_idx); /* VGA_idx = 2~0 */
- break;
- case 5:
- rx_pwr_all = -42 + 2 * (7 - VGA_idx); /* VGA_idx = 7~5 */
- break;
- case 4:
- rx_pwr_all = -36 + 2 * (7 - VGA_idx); /* VGA_idx = 7~4 */
- break;
- case 3:
- rx_pwr_all = -24 + 2 * (7 - VGA_idx); /* VGA_idx = 7~0 */
- break;
- case 2:
- if (cck_highpwr)
- rx_pwr_all = -12 + 2 * (5 - VGA_idx); /* VGA_idx = 5~0 */
- else
- rx_pwr_all = -6 + 2 * (5 - VGA_idx);
- break;
- case 1:
- rx_pwr_all = 8 - 2 * VGA_idx;
- break;
- case 0:
- rx_pwr_all = 14 - 2 * VGA_idx;
- break;
- default:
- break;
- }
- rx_pwr_all += 6;
- PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
- if (!cck_highpwr) {
- if (PWDB_ALL >= 80)
- PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
- else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
- PWDB_ALL += 3;
- if (PWDB_ALL > 100)
- PWDB_ALL = 100;
- }
-
- pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->recvpower = rx_pwr_all;
- /* (3) Get Signal Quality (EVM) */
- if (pPktinfo->bPacketMatchBSSID) {
- u8 SQ, SQ_rpt;
-
- if (pPhyInfo->RxPWDBAll > 40) {
- SQ = 100;
- } else {
- SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
-
- if (SQ_rpt > 64)
- SQ = 0;
- else if (SQ_rpt < 20)
- SQ = 100;
- else
- SQ = ((64 - SQ_rpt) * 100) / 44;
- }
- pPhyInfo->SignalQuality = SQ;
- }
- } else { /* is OFDM rate */
- /* (1)Get RSSI for HT rate */
-
- for (i = RF_PATH_A; i < RF_PATH_MAX; i++) {
- /* 2008/01/30 MH we will judge RF RX path now. */
- if (dm_odm->RFPathRxEnable & BIT(i))
- rf_rx_num++;
-
- rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain & 0x3F) * 2) - 110;
- if (i == RF_PATH_A)
- adapt->signal_strength = rx_pwr[i];
-
- pPhyInfo->RxPwr[i] = rx_pwr[i];
-
- /* Translate DBM to percentage. */
- RSSI = odm_query_rxpwrpercentage(rx_pwr[i]);
- total_rssi += RSSI;
-
- pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
-
- /* Get Rx snr value in DB */
- dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
- }
- /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
- rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
-
- PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
-
- pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->RxPower = rx_pwr_all;
- pPhyInfo->recvpower = rx_pwr_all;
-
- /* (3)EVM of HT rate */
- if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
- Max_spatial_stream = 2; /* both spatial stream make sense */
- else
- Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
-
- for (i = 0; i < Max_spatial_stream; i++) {
- /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
- /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
- /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
- EVM = odm_evm_db_to_percentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
-
- if (pPktinfo->bPacketMatchBSSID) {
- if (i == RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
- pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
- }
- }
- }
- /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
- /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
- if (isCCKrate) {
- pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
- } else {
- if (rf_rx_num != 0)
- pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, total_rssi /= rf_rx_num));
- }
-
- /* For 88E HW Antenna Diversity */
- dm_odm->DM_FatTable.antsel_rx_keep_0 = pPhyStaRpt->ant_sel;
- dm_odm->DM_FatTable.antsel_rx_keep_1 = pPhyStaRpt->ant_sel_b;
- dm_odm->DM_FatTable.antsel_rx_keep_2 = pPhyStaRpt->antsel_rx_keep_2;
-}
-
-static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
- struct phy_info *pPhyInfo,
- struct odm_per_pkt_info *pPktinfo)
-{
- s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
- s32 UndecoratedSmoothedOFDM, RSSI_Ave;
- u8 isCCKrate = 0;
- u8 RSSI_max, RSSI_min, i;
- u32 OFDM_pkt = 0;
- u32 Weighting = 0;
- struct sta_info *pEntry;
- u8 antsel_tr_mux;
- struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
-
- if (pPktinfo->StationID == 0xFF)
- return;
- pEntry = dm_odm->pODM_StaInfo[pPktinfo->StationID];
- if (!IS_STA_VALID(pEntry))
- return;
- if ((!pPktinfo->bPacketMatchBSSID))
- return;
-
- isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
-
- /* Smart Antenna Debug Message------------------ */
- if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
- (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
- ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
- }
- }
-
- /* Smart Antenna Debug Message------------------ */
-
- UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
- UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
- UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
-
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
- if (!isCCKrate) { /* ofdm rate */
- if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_B] == 0) {
- RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- } else {
- if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[RF_PATH_B]) {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
- } else {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- }
- if ((RSSI_max - RSSI_min) < 3)
- RSSI_Ave = RSSI_max;
- else if ((RSSI_max - RSSI_min) < 6)
- RSSI_Ave = RSSI_max - 1;
- else if ((RSSI_max - RSSI_min) < 10)
- RSSI_Ave = RSSI_max - 2;
- else
- RSSI_Ave = RSSI_max - 3;
- }
-
- /* 1 Process OFDM RSSI */
- if (UndecoratedSmoothedOFDM <= 0) { /* initialize */
- UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
- } else {
- if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
- UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) +
- (RSSI_Ave)) / (Rx_Smooth_Factor);
- UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
- } else {
- UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) +
- (RSSI_Ave)) / (Rx_Smooth_Factor);
- }
- }
-
- pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap << 1) | BIT(0);
-
- } else {
- RSSI_Ave = pPhyInfo->RxPWDBAll;
-
- /* 1 Process CCK RSSI */
- if (UndecoratedSmoothedCCK <= 0) { /* initialize */
- UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
- } else {
- if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
- UndecoratedSmoothedCCK =
- ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) +
- pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
- UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
- } else {
- UndecoratedSmoothedCCK =
- ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) +
- pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
- }
- }
- pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap << 1;
- }
- /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
- if (pEntry->rssi_stat.ValidBit >= 64)
- pEntry->rssi_stat.ValidBit = 64;
- else
- pEntry->rssi_stat.ValidBit++;
-
- for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
- OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap >> i) & BIT(0);
-
- if (pEntry->rssi_stat.ValidBit == 64) {
- Weighting = ((OFDM_pkt << 4) > 64) ? 64 : (OFDM_pkt << 4);
- UndecoratedSmoothedPWDB = (Weighting * UndecoratedSmoothedOFDM + (64 - Weighting) * UndecoratedSmoothedCCK) >> 6;
- } else {
- if (pEntry->rssi_stat.ValidBit != 0)
- UndecoratedSmoothedPWDB = (OFDM_pkt * UndecoratedSmoothedOFDM +
- (pEntry->rssi_stat.ValidBit - OFDM_pkt) *
- UndecoratedSmoothedCCK) / pEntry->rssi_stat.ValidBit;
- else
- UndecoratedSmoothedPWDB = 0;
- }
- pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
- pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
- pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
- }
-}
-
-/* Endianness before calling this API */
-void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_per_pkt_info *pPktinfo,
- struct adapter *adapt)
-{
- odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
- odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
-}
diff --git a/drivers/staging/r8188eu/hal/odm_RTL8188E.c b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
deleted file mode 100644
index f3f4074d4316..000000000000
--- a/drivers/staging/r8188eu/hal/odm_RTL8188E.c
+++ /dev/null
@@ -1,264 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
-{
- struct adapter *adapter = dm_odm->Adapter;
- u32 value32;
-
- /* MAC Setting */
- value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- /* Pin Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
- /* OFDM Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
- /* CCK Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
- ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
-}
-
-static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
-{
- struct adapter *adapter = dm_odm->Adapter;
- u32 value32;
-
- /* MAC Setting */
- value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- /* Pin Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
- /* OFDM Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
- /* CCK Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
- /* Tx Settings */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
- ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
-
- /* antenna mapping table */
- if (!dm_odm->bIsMPChip) { /* testchip */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
- } else { /* MPchip */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
- }
-}
-
-static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
-{
- struct adapter *adapter = dm_odm->Adapter;
- u32 value32;
-
- /* MAC Setting */
- value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x4c, bMaskDWord);
- rtl8188e_PHY_SetBBReg(adapter, 0x4c, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x7B4, bMaskDWord);
- rtl8188e_PHY_SetBBReg(adapter, 0x7b4, bMaskDWord, value32 | (BIT(16) | BIT(17))); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
-
- /* Match MAC ADDR */
- rtl8188e_PHY_SetBBReg(adapter, 0x7b4, 0xFFFF, 0);
- rtl8188e_PHY_SetBBReg(adapter, 0x7b0, bMaskDWord, 0);
-
- rtl8188e_PHY_SetBBReg(adapter, 0x870, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
- rtl8188e_PHY_SetBBReg(adapter, 0xca4, bMaskDWord, 0x000000a0);
-
- if (!dm_odm->bIsMPChip) { /* testchip */
- rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
- } else { /* MPchip */
- rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte0, 1);
- rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte1, 2);
- }
-
- /* Default Ant Setting when no fast training */
- rtl8188e_PHY_SetBBReg(adapter, 0x80c, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
- rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX */
- rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(8) | BIT(7) | BIT(6), 1); /* Optional RX */
-
- /* Enter Training state */
- rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(2) | BIT(1) | BIT(0), 1);
- rtl8188e_PHY_SetBBReg(adapter, 0xc50, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
-}
-
-void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
-{
- if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)
- odm_RX_HWAntDivInit(dm_odm);
- else if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- odm_TRX_HWAntDivInit(dm_odm);
- else if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)
- odm_FastAntTrainingInit(dm_odm);
-}
-
-void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
-{
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- struct adapter *adapter = dm_odm->Adapter;
- u32 DefaultAnt, OptionalAnt;
-
- if (dm_fat_tbl->RxIdleAnt != Ant) {
- if (Ant == MAIN_ANT) {
- DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- } else {
- DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- }
-
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), DefaultAnt); /* Default TX */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RESP_TX_11N, BIT(6) | BIT(7), DefaultAnt); /* Resp Tx */
- } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
- }
- }
- dm_fat_tbl->RxIdleAnt = Ant;
- if (Ant != MAIN_ANT)
- pr_info("RxIdleAnt=AUX_ANT\n");
-}
-
-static void odm_UpdateTxAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant, u32 MacId)
-{
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- u8 TargetAnt;
-
- if (Ant == MAIN_ANT)
- TargetAnt = MAIN_ANT_CG_TRX;
- else
- TargetAnt = AUX_ANT_CG_TRX;
- dm_fat_tbl->antsel_a[MacId] = TargetAnt & BIT(0);
- dm_fat_tbl->antsel_b[MacId] = (TargetAnt & BIT(1)) >> 1;
- dm_fat_tbl->antsel_c[MacId] = (TargetAnt & BIT(2)) >> 2;
-}
-
-void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *dm_odm, u8 *pDesc, u8 macId)
-{
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
-
- if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)) {
- SET_TX_DESC_ANTSEL_A_88E(pDesc, dm_fat_tbl->antsel_a[macId]);
- SET_TX_DESC_ANTSEL_B_88E(pDesc, dm_fat_tbl->antsel_b[macId]);
- SET_TX_DESC_ANTSEL_C_88E(pDesc, dm_fat_tbl->antsel_c[macId]);
- }
-}
-
-void ODM_AntselStatistics_88E(struct odm_dm_struct *dm_odm, u8 antsel_tr_mux, u32 MacId, u8 RxPWDBAll)
-{
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- if (antsel_tr_mux == MAIN_ANT_CG_TRX) {
- dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
- dm_fat_tbl->MainAnt_Cnt[MacId]++;
- } else {
- dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
- dm_fat_tbl->AuxAnt_Cnt[MacId]++;
- }
- } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- if (antsel_tr_mux == MAIN_ANT_CGCS_RX) {
- dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
- dm_fat_tbl->MainAnt_Cnt[MacId]++;
- } else {
- dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
- dm_fat_tbl->AuxAnt_Cnt[MacId]++;
- }
- }
-}
-
-static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
-{
- u32 i, MinRSSI = 0xFF, AntDivMaxRSSI = 0, MaxRSSI = 0, LocalMinRSSI, LocalMaxRSSI;
- u32 Main_RSSI, Aux_RSSI;
- u8 RxIdleAnt = 0, TargetAnt = 7;
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
- struct sta_info *pEntry;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- pEntry = dm_odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pEntry)) {
- /* 2 Calculate RSSI per Antenna */
- Main_RSSI = (dm_fat_tbl->MainAnt_Cnt[i] != 0) ? (dm_fat_tbl->MainAnt_Sum[i] / dm_fat_tbl->MainAnt_Cnt[i]) : 0;
- Aux_RSSI = (dm_fat_tbl->AuxAnt_Cnt[i] != 0) ? (dm_fat_tbl->AuxAnt_Sum[i] / dm_fat_tbl->AuxAnt_Cnt[i]) : 0;
- TargetAnt = (Main_RSSI >= Aux_RSSI) ? MAIN_ANT : AUX_ANT;
- /* 2 Select MaxRSSI for DIG */
- LocalMaxRSSI = max(Main_RSSI, Aux_RSSI);
- if ((LocalMaxRSSI > AntDivMaxRSSI) && (LocalMaxRSSI < 40))
- AntDivMaxRSSI = LocalMaxRSSI;
- if (LocalMaxRSSI > MaxRSSI)
- MaxRSSI = LocalMaxRSSI;
-
- /* 2 Select RX Idle Antenna */
- if ((dm_fat_tbl->RxIdleAnt == MAIN_ANT) && (Main_RSSI == 0))
- Main_RSSI = Aux_RSSI;
- else if ((dm_fat_tbl->RxIdleAnt == AUX_ANT) && (Aux_RSSI == 0))
- Aux_RSSI = Main_RSSI;
-
- LocalMinRSSI = min(Main_RSSI, Aux_RSSI);
- if (LocalMinRSSI < MinRSSI) {
- MinRSSI = LocalMinRSSI;
- RxIdleAnt = TargetAnt;
- }
- /* 2 Select TRX Antenna */
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- odm_UpdateTxAnt_88E(dm_odm, TargetAnt, i);
- }
- dm_fat_tbl->MainAnt_Sum[i] = 0;
- dm_fat_tbl->AuxAnt_Sum[i] = 0;
- dm_fat_tbl->MainAnt_Cnt[i] = 0;
- dm_fat_tbl->AuxAnt_Cnt[i] = 0;
- }
-
- /* 2 Set RX Idle Antenna */
- ODM_UpdateRxIdleAnt_88E(dm_odm, RxIdleAnt);
-
- pDM_DigTable->AntDiv_RSSI_max = AntDivMaxRSSI;
- pDM_DigTable->RSSI_max = MaxRSSI;
-}
-
-void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
-{
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- struct adapter *adapter = dm_odm->Adapter;
-
- if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
- return;
- if (!dm_odm->bLinked) {
- if (dm_fat_tbl->bBecomeLinked) {
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 0); /* RegC50[7]=1'b1 enable HW AntDiv */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); /* Enable CCK AntDiv */
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
- dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
- }
- return;
- } else {
- if (!dm_fat_tbl->bBecomeLinked) {
- /* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); /* Enable CCK AntDiv */
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
- dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
- }
- }
- if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
- odm_HWAntDiv(dm_odm);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
deleted file mode 100644
index 788904d4655c..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ /dev/null
@@ -1,694 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_CMD_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_ioctl_set.h"
-
-#include "../include/rtl8188e_hal.h"
-
-#define RTL88E_MAX_H2C_BOX_NUMS 4
-#define RTL88E_MAX_CMD_LEN 7
-#define RTL88E_MESSAGE_BOX_SIZE 4
-#define RTL88E_EX_MESSAGE_BOX_SIZE 4
-
-static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
-{
- u8 read_down = false, reg;
- int retry_cnts = 100;
- int res;
-
- u8 valid;
-
- do {
- res = rtw_read8(adapt, REG_HMETFR, &reg);
- if (res)
- continue;
-
- valid = reg & BIT(msgbox_num);
- if (0 == valid)
- read_down = true;
- } while ((!read_down) && (retry_cnts--));
-
- return read_down;
-}
-
-/*****************************************
-* H2C Msg format :
-* 0x1DF - 0x1D0
-*| 31 - 8 | 7-5 4 - 0 |
-*| h2c_msg |Class_ID CMD_ID |
-*
-* Extend 0x1FF - 0x1F0
-*|31 - 0 |
-*|ext_msg|
-******************************************/
-static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
-{
- u8 bcmd_down = false;
- s32 retry_cnts = 100;
- u8 h2c_box_num;
- u32 msgbox_addr;
- u32 msgbox_ex_addr;
- struct hal_data_8188e *haldata = &adapt->haldata;
- u8 cmd_idx, ext_cmd_len;
- u32 h2c_cmd = 0;
- u32 h2c_cmd_ex = 0;
-
- if (!adapt->bFWReady)
- return _FAIL;
-
- if (!pCmdBuffer || CmdLen > RTL88E_MAX_CMD_LEN || adapt->bSurpriseRemoved)
- return _FAIL;
-
- /* pay attention to if race condition happened in H2C cmd setting. */
- do {
- h2c_box_num = haldata->LastHMEBoxNum;
-
- if (!_is_fw_read_cmd_down(adapt, h2c_box_num))
- return _FAIL;
-
- *(u8 *)(&h2c_cmd) = ElementID;
-
- if (CmdLen <= 3) {
- memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, CmdLen);
- } else {
- memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, 3);
- ext_cmd_len = CmdLen - 3;
- memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer + 3, ext_cmd_len);
-
- /* Write Ext command */
- msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++) {
- rtw_write8(adapt, msgbox_ex_addr + cmd_idx, *((u8 *)(&h2c_cmd_ex) + cmd_idx));
- }
- }
- /* Write command */
- msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++) {
- rtw_write8(adapt, msgbox_addr + cmd_idx, *((u8 *)(&h2c_cmd) + cmd_idx));
- }
- bcmd_down = true;
-
- haldata->LastHMEBoxNum = (h2c_box_num + 1) % RTL88E_MAX_H2C_BOX_NUMS;
-
- } while ((!bcmd_down) && (retry_cnts--));
-
- return _SUCCESS;
-}
-
-u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
-{
- u8 buf[3];
- u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = &adapt->haldata;
-
- if (haldata->fw_ractrl) {
- __le32 lmask;
-
- memset(buf, 0, 3);
- lmask = cpu_to_le32(mask);
- memcpy(buf, &lmask, 3);
-
- FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
- } else {
- res = _FAIL;
- }
-
- return res;
-}
-
-/* bitmap[0:27] = tx_rate_bitmap */
-/* bitmap[28:31]= Rate Adaptive id */
-/* arg[0:4] = macid */
-/* arg[5] = Short GI */
-void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
-{
- struct hal_data_8188e *haldata = &pAdapter->haldata;
-
- u8 macid, raid, short_gi_rate = false;
-
- macid = arg & 0x1f;
-
- raid = (bitmap >> 28) & 0x0f;
- bitmap &= 0x0fffffff;
-
- if (rssi_level != DM_RATR_STA_INIT)
- bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, macid, bitmap, rssi_level);
-
- bitmap |= ((raid << 28) & 0xf0000000);
-
- short_gi_rate = (arg & BIT(5)) ? true : false;
-
- raid = (bitmap >> 28) & 0x0f;
-
- bitmap &= 0x0fffffff;
-
- ODM_RA_UpdateRateInfo_8188E(&haldata->odmpriv, macid, raid, bitmap, short_gi_rate);
-}
-
-void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
-{
- struct setpwrmode_parm H2CSetPwrMode;
- struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
- u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
-
- switch (Mode) {
- case PS_MODE_ACTIVE:
- H2CSetPwrMode.Mode = 0;
- break;
- case PS_MODE_MIN:
- H2CSetPwrMode.Mode = 1;
- break;
- case PS_MODE_MAX:
- RLBM = 1;
- H2CSetPwrMode.Mode = 1;
- break;
- case PS_MODE_DTIM:
- RLBM = 2;
- H2CSetPwrMode.Mode = 1;
- break;
- case PS_MODE_UAPSD_WMM:
- H2CSetPwrMode.Mode = 2;
- break;
- default:
- H2CSetPwrMode.Mode = 0;
- break;
- }
-
- H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps << 4) & 0xf0) | (RLBM & 0x0f));
-
- H2CSetPwrMode.AwakeInterval = 1;
-
- H2CSetPwrMode.bAllQueueUAPSD = adapt->registrypriv.uapsd_enable;
-
- if (Mode > 0)
- H2CSetPwrMode.PwrState = 0x00;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
- else
- H2CSetPwrMode.PwrState = 0x0C;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
-
- FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
-
-}
-
-void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, u16 mstatus_rpt)
-{
- __le16 mst_rpt = cpu_to_le16(mstatus_rpt);
-
- FillH2CCmd_88E(adapt, H2C_COM_MEDIA_STATUS_RPT, sizeof(mst_rpt), (u8 *)&mst_rpt);
-}
-
-static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
-{
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- u32 rate_len, pktlen;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
-
- eth_broadcast_addr(pwlanhdr->addr1);
- memcpy(pwlanhdr->addr2, myid(&adapt->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
-
- SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
- SetFrameSubType(pframe, WIFI_BEACON);
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* timestamp will be inserted by hardware */
- pframe += 8;
- pktlen += 8;
-
- /* beacon interval: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pktlen += 2;
-
- /* capability info: 2 bytes */
- memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
-
- pframe += 2;
- pktlen += 2;
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fixed_ie);
- memcpy(pframe, cur_network->IEs + sizeof(struct ndis_802_11_fixed_ie), pktlen);
-
- goto _ConstructBeacon;
- }
-
- /* below for ad-hoc mode */
-
- /* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
- pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pktlen);
-
- if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- ATIMWindow = 0;
- pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pktlen);
- }
-
- /* todo: ERP IE */
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pktlen);
-
- /* todo:HT for adhoc */
-
-_ConstructBeacon:
-
- if ((pktlen + TXDESC_SIZE) > 512)
- return;
-
- *pLength = pktlen;
-}
-
-static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
-{
- struct ieee80211_hdr *pwlanhdr;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- __le16 *fctrl;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- /* Frame control. */
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- SetPwrMgt(fctrl);
- SetFrameSubType(pframe, WIFI_PSPOLL);
-
- /* AID. */
- SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
-
- /* BSSID. */
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
-
- /* TA. */
- memcpy(pwlanhdr->addr2, myid(&adapt->eeprompriv), ETH_ALEN);
-
- *pLength = 16;
-}
-
-static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
- u32 *pLength,
- u8 *StaAddr,
- u8 bQoS,
- u8 AC,
- u8 bEosp,
- u8 bForcePowerSave)
-{
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- u32 pktlen;
- struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- if (bForcePowerSave)
- SetPwrMgt(fctrl);
-
- switch (cur_network->network.InfrastructureMode) {
- case Ndis802_11Infrastructure:
- SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&adapt->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
- break;
- case Ndis802_11APMode:
- SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&adapt->eeprompriv), ETH_ALEN);
- break;
- case Ndis802_11IBSS:
- default:
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&adapt->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
- break;
- }
-
- SetSeqNum(pwlanhdr, 0);
-
- if (bQoS) {
- struct ieee80211_qos_hdr *pwlanqoshdr;
-
- SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
-
- pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
- SetPriority(&pwlanqoshdr->qos_ctrl, AC);
- SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
-
- pktlen = sizeof(struct ieee80211_qos_hdr);
- } else {
- SetFrameSubType(pframe, WIFI_DATA_NULL);
-
- pktlen = sizeof(struct ieee80211_qos_hdr);
- }
-
- *pLength = pktlen;
-}
-
-static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
-{
- struct ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- u8 *mac, *bssid;
- u32 pktlen;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&adapt->eeprompriv);
- bssid = cur_network->MacAddress;
-
- fctrl = &pwlanhdr->frame_control;
- *(fctrl) = 0;
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
-
- SetSeqNum(pwlanhdr, 0);
- SetFrameSubType(fctrl, WIFI_PROBERSP);
-
- pktlen = sizeof(struct ieee80211_hdr_3addr);
- pframe += pktlen;
-
- if (cur_network->IELength > MAX_IE_SZ)
- return;
-
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- pframe += cur_network->IELength;
- pktlen += cur_network->IELength;
-
- *pLength = pktlen;
-}
-
-/* To check if reserved page content is destroyed by beacon because beacon is too large. */
-/* 2010.06.23. Added by tynli. */
-void CheckFwRsvdPageContent(struct adapter *Adapter)
-{
-}
-
-/* */
-/* Description: Fill the reserved packets that FW will use to RSVD page. */
-/* Now we just send 4 types packet to rsvd page. */
-/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
-/* Input: */
-/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
-/* so we need to set the packet length to total length. */
-/* true: At the second time, we should send the first packet (default:beacon) */
-/* to Hw again and set the length in descriptor to the real beacon length. */
-/* 2009.10.15 by tynli. */
-static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
- u32 NullDataLength, QosNullLength;
- u8 *ReservedPagePacket;
- u8 PageNum, PageNeed, TxDescLen;
- u16 BufIndex;
- u32 TotalPacketLen;
- struct rsvdpage_loc RsvdPageLoc;
-
- ReservedPagePacket = kzalloc(1000, GFP_KERNEL);
- if (!ReservedPagePacket)
- return;
-
- pxmitpriv = &adapt->xmitpriv;
- pmlmeext = &adapt->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- TxDescLen = TXDESC_SIZE;
- PageNum = 0;
-
- /* 3 (1) beacon * 2 pages */
- BufIndex = TXDESC_OFFSET;
- ConstructBeacon(adapt, &ReservedPagePacket[BufIndex], &BeaconLength);
-
- /* When we count the first page size, we need to reserve description size for the RSVD */
- /* packet, it will be filled in front of the packet in TXPKTBUF. */
- PageNeed = (u8)PageNum_128(TxDescLen + BeaconLength);
- /* To reserved 2 pages for beacon buffer. 2010.06.24. */
- if (PageNeed == 1)
- PageNeed += 1;
- PageNum += PageNeed;
-
- BufIndex += PageNeed * 128;
-
- /* 3 (2) ps-poll *1 page */
- RsvdPageLoc.LocPsPoll = PageNum;
- ConstructPSPoll(adapt, &ReservedPagePacket[BufIndex], &PSPollLength);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], PSPollLength, true, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed * 128;
-
- /* 3 (3) null data * 1 page */
- RsvdPageLoc.LocNullData = PageNum;
- ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, get_my_bssid(&pmlmeinfo->network), false, 0, 0, false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], NullDataLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed * 128;
-
- /* 3 (4) probe response * 1page */
- RsvdPageLoc.LocProbeRsp = PageNum;
- ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, get_my_bssid(&pmlmeinfo->network), false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], ProbeRspLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed * 128;
-
- /* 3 (5) Qos null data */
- RsvdPageLoc.LocQosNull = PageNum;
- ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex],
- &QosNullLength, get_my_bssid(&pmlmeinfo->network), true, 0, 0, false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], QosNullLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
- PageNum += PageNeed;
-
- TotalPacketLen = BufIndex + QosNullLength;
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(adapt, pattrib);
- pattrib->qsel = 0x10;
- pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
- pattrib->pktlen = pattrib->last_txcmdsz;
- memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
-
- rtl8188eu_mgnt_xmit(adapt, pmgntframe);
-
- FillH2CCmd_88E(adapt, H2C_COM_RSVD_PAGE, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
-
-exit:
- kfree(ReservedPagePacket);
-}
-
-void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- bool bSendBeacon = false;
- bool bcn_valid = false;
- u8 DLBcnCount = 0;
- u32 poll = 0;
- u8 reg;
- int res;
-
- if (mstatus == 1) {
- /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
- /* Suggested by filen. Added by tynli. */
- rtw_write16(adapt, REG_BCN_PSR_RPT, (0xC000 | pmlmeinfo->aid));
- /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
-
- /* Set REG_CR bit 8. DMA beacon by SW. */
- haldata->RegCR_1 |= BIT(0);
- rtw_write8(adapt, REG_CR + 1, haldata->RegCR_1);
-
- /* Disable Hw protection for a time which revserd for Hw sending beacon. */
- /* Fix download reserved page packet fail that access collision with the protection time. */
- /* 2010.05.11. Added by tynli. */
- res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(3)));
-
- res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(4));
-
- if (haldata->RegFwHwTxQCtrl & BIT(6))
- bSendBeacon = true;
-
- /* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
- haldata->RegFwHwTxQCtrl &= (~BIT(6));
-
- clear_beacon_valid_bit(adapt);
- DLBcnCount = 0;
- poll = 0;
- do {
- /* download rsvd page. */
- SetFwRsvdPagePkt(adapt, false);
- DLBcnCount++;
- do {
- yield();
- /* mdelay(10); */
- /* check rsvd page download OK. */
- bcn_valid = get_beacon_valid_bit(adapt);
- poll++;
- } while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
- } while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
-
- /* */
- /* We just can send the reserved page twice during the time that Tx thread is stopped (e.g. pnpsetpower) */
- /* because we need to free the Tx BCN Desc which is used by the first reserved page packet. */
- /* At run time, we cannot get the Tx Desc until it is released in TxHandleInterrupt() so we will return */
- /* the beacon TCB in the following code. 2011.11.23. by tynli. */
- /* */
-
- /* Enable Bcn */
- res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapt, REG_BCN_CTRL, reg | BIT(3));
-
- res = rtw_read8(adapt, REG_BCN_CTRL, &reg);
- if (res)
- return;
-
- rtw_write8(adapt, REG_BCN_CTRL, reg & (~BIT(4)));
-
- /* To make sure that if there exists an adapter which would like to send beacon. */
- /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
- /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
- /* the beacon cannot be sent by HW. */
- /* 2010.06.23. Added by tynli. */
- if (bSendBeacon) {
- rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl | BIT(6)));
- haldata->RegFwHwTxQCtrl |= BIT(6);
- }
-
- /* Update RSVD page location H2C to Fw. */
- if (bcn_valid)
- clear_beacon_valid_bit(adapt);
-
- /* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
- /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
- haldata->RegCR_1 &= (~BIT(0));
- rtw_write8(adapt, REG_CR + 1, haldata->RegCR_1);
- }
-
-}
-
-void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct wifidirect_info *pwdinfo = &adapt->wdinfo;
- struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
- u8 i;
-
- switch (p2p_ps_state) {
- case P2P_PS_DISABLE:
- memset(p2p_ps_offload, 0, 1);
- break;
- case P2P_PS_ENABLE:
- /* update CTWindow value. */
- if (pwdinfo->ctwindow > 0) {
- p2p_ps_offload->CTWindow_En = 1;
- rtw_write8(adapt, REG_P2P_CTWIN, pwdinfo->ctwindow);
- }
-
- /* hw only support 2 set of NoA */
- for (i = 0; i < pwdinfo->noa_num; i++) {
- /* To control the register setting for which NOA */
- rtw_write8(adapt, REG_NOA_DESC_SEL, (i << 4));
- if (i == 0)
- p2p_ps_offload->NoA0_En = 1;
- else
- p2p_ps_offload->NoA1_En = 1;
-
- /* config P2P NoA Descriptor Register */
- rtw_write32(adapt, REG_NOA_DESC_DURATION, pwdinfo->noa_duration[i]);
- rtw_write32(adapt, REG_NOA_DESC_INTERVAL, pwdinfo->noa_interval[i]);
- rtw_write32(adapt, REG_NOA_DESC_START, pwdinfo->noa_start_time[i]);
- rtw_write8(adapt, REG_NOA_DESC_COUNT, pwdinfo->noa_count[i]);
- }
-
- if ((pwdinfo->opp_ps == 1) || (pwdinfo->noa_num > 0)) {
- /* rst p2p circuit */
- rtw_write8(adapt, REG_DUAL_TSF_RST, BIT(4));
-
- p2p_ps_offload->Offload_En = 1;
-
- if (pwdinfo->role == P2P_ROLE_GO) {
- p2p_ps_offload->role = 1;
- p2p_ps_offload->AllStaSleep = 0;
- } else {
- p2p_ps_offload->role = 0;
- }
-
- p2p_ps_offload->discovery = 0;
- }
- break;
- case P2P_PS_SCAN:
- p2p_ps_offload->discovery = 1;
- break;
- case P2P_PS_SCAN_DONE:
- p2p_ps_offload->discovery = 0;
- pwdinfo->p2p_ps_state = P2P_PS_ENABLE;
- break;
- default:
- break;
- }
-
- FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
deleted file mode 100644
index 0399872c4546..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-/* This file is for 92CE/92CU dynamic mechanism only */
-#define _RTL8188E_DM_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-
-/* Initialize GPIO setting registers */
-static void dm_InitGPIOSetting(struct adapter *Adapter)
-{
- u8 tmp1byte;
- int res;
-
- res = rtw_read8(Adapter, REG_GPIO_MUXCFG, &tmp1byte);
- if (res)
- return;
-
- tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
-
- rtw_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
-}
-
-/* */
-/* functions */
-/* */
-static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
-{
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct hal_data_8188e *hal_data = &Adapter->haldata;
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- int i;
-
- pdmpriv->InitODMFlag = ODM_BB_RSSI_MONITOR;
- if (hal_data->AntDivCfg)
- pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
-
- dm_odm->SupportAbility = pdmpriv->InitODMFlag;
-
- dm_odm->pWirelessMode = &pmlmeext->cur_wireless_mode;
- dm_odm->pSecChOffset = &hal_data->nCur40MhzPrimeSC;
- dm_odm->pBandWidth = &hal_data->CurrentChannelBW;
- dm_odm->pChannel = &hal_data->CurrentChannel;
- dm_odm->pbScanInProcess = &pmlmepriv->bScanInProcess;
- dm_odm->pbPowerSaving = &pwrctrlpriv->bpower_saving;
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
-
- for (i = 0; i < NUM_STA; i++)
- dm_odm->pODM_StaInfo[i] = NULL;
-}
-
-void rtl8188e_InitHalDm(struct adapter *Adapter)
-{
- struct hal_data_8188e *hal_data = &Adapter->haldata;
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
-
- dm_InitGPIOSetting(Adapter);
- Update_ODM_ComInfo_88E(Adapter);
- ODM_DMInit(dm_odm);
-}
-
-void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
-{
- u8 hw_init_completed = Adapter->hw_init_completed;
- struct hal_data_8188e *hal_data = &Adapter->haldata;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 bLinked = false;
-
- if (!hw_init_completed)
- return;
-
- if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
- if (Adapter->stapriv.asoc_sta_count > 2)
- bLinked = true;
- } else {/* Station mode */
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- bLinked = true;
- }
-
- hal_data->odmpriv.bLinked = bLinked;
- ODM_DMWatchdog(&hal_data->odmpriv);
-}
-
-void rtl8188e_init_dm_priv(struct adapter *Adapter)
-{
- struct hal_data_8188e *hal_data = &Adapter->haldata;
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
-
- memset(pdmpriv, 0, sizeof(struct dm_priv));
- memset(dm_odm, 0, sizeof(*dm_odm));
-
- dm_odm->Adapter = Adapter;
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(hal_data->VersionID));
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
-}
-
-/* Add new function to reset the state of antenna diversity before link. */
-/* Compare RSSI for deciding antenna */
-void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
-{
- struct hal_data_8188e *hal_data = &Adapter->haldata;
-
- if (0 != hal_data->AntDivCfg) {
- /* select optimum_antenna for before linked =>For antenna diversity */
- if (dst->Rssi >= src->Rssi) {/* keep org parameter */
- src->Rssi = dst->Rssi;
- src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
- }
- }
-}
-
-/* Add new function to reset the state of antenna diversity before link. */
-u8 AntDivBeforeLink8188E(struct adapter *Adapter)
-{
- struct hal_data_8188e *hal_data = &Adapter->haldata;
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
- struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
-
- /* Condition that does not need to use antenna diversity. */
- if (hal_data->AntDivCfg == 0)
- return false;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- return false;
-
- if (dm_swat_tbl->SWAS_NoLink_State == 0) {
- /* switch channel */
- dm_swat_tbl->SWAS_NoLink_State = 1;
- dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
-
- rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
- return true;
- } else {
- dm_swat_tbl->SWAS_NoLink_State = 0;
- return false;
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
deleted file mode 100644
index 73855bca76fe..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ /dev/null
@@ -1,922 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _HAL_INIT_C_
-
-#include "../include/drv_types.h"
-#include "../include/rtw_efuse.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtw_iol.h"
-#include "../include/usb_ops.h"
-#include "../include/rtw_fw.h"
-
-static void iol_mode_enable(struct adapter *padapter, u8 enable)
-{
- u8 reg_0xf0 = 0;
- int res;
-
- if (enable) {
- /* Enable initial offload */
- res = rtw_read8(padapter, REG_SYS_CFG, &reg_0xf0);
- if (res)
- return;
-
- rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN);
-
- if (!padapter->bFWReady)
- rtw_reset_8051(padapter);
-
- } else {
- /* disable initial offload */
- res = rtw_read8(padapter, REG_SYS_CFG, &reg_0xf0);
- if (res)
- return;
-
- rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 & ~SW_OFFLOAD_EN);
- }
-}
-
-static s32 iol_execute(struct adapter *padapter, u8 control)
-{
- s32 status = _FAIL;
- u8 reg_0x88 = 0;
- unsigned long timeout;
- int res;
-
- control = control & 0x0f;
- res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
- if (res)
- return _FAIL;
-
- rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control);
-
- timeout = jiffies + msecs_to_jiffies(1000);
-
- do {
- res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
- if (res)
- continue;
-
- if (!(reg_0x88 & control))
- break;
-
- } while (time_before(jiffies, timeout));
-
- res = rtw_read8(padapter, REG_HMEBOX_E0, &reg_0x88);
- if (res)
- return _FAIL;
-
- status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
- if (reg_0x88 & control << 4)
- status = _FAIL;
- return status;
-}
-
-static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
-{
- s32 rst = _SUCCESS;
- iol_mode_enable(padapter, 1);
- rtw_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy);
- rst = iol_execute(padapter, CMD_INIT_LLT);
- iol_mode_enable(padapter, 0);
- return rst;
-}
-
-static void
-efuse_phymap_to_logical(u8 *phymap, u16 _size_byte, u8 *pbuf)
-{
- u8 *efuseTbl = NULL;
- u8 rtemp8;
- u16 eFuse_Addr = 0;
- u8 offset, wren;
- u16 i, j;
- u16 **eFuseWord = NULL;
- u8 u1temp = 0;
-
- efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
- if (!efuseTbl)
- goto exit;
-
- eFuseWord = rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
- if (!eFuseWord)
- goto exit;
-
- /* 0. Refresh efuse init map as all oxFF. */
- for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
- eFuseWord[i][j] = 0xFFFF;
-
- /* */
- /* 1. Read the first byte to check if efuse is empty!!! */
- /* */
- /* */
- rtemp8 = *(phymap + eFuse_Addr);
- if (rtemp8 != 0xFF) {
- eFuse_Addr++;
- } else {
- goto exit;
- }
-
- /* */
- /* 2. Read real efuse content. Filter PG header and every section data. */
- /* */
- while ((rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
- /* Check PG header for section num. */
- if ((rtemp8 & 0x1F) == 0x0F) { /* extended header */
- u1temp = ((rtemp8 & 0xE0) >> 5);
- rtemp8 = *(phymap + eFuse_Addr);
- if ((rtemp8 & 0x0F) == 0x0F) {
- eFuse_Addr++;
- rtemp8 = *(phymap + eFuse_Addr);
-
- if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
- eFuse_Addr++;
- continue;
- } else {
- offset = ((rtemp8 & 0xF0) >> 1) | u1temp;
- wren = (rtemp8 & 0x0F);
- eFuse_Addr++;
- }
- } else {
- offset = ((rtemp8 >> 4) & 0x0f);
- wren = (rtemp8 & 0x0f);
- }
-
- if (offset < EFUSE_MAX_SECTION_88E) {
- /* Get word enable value from PG header */
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(wren & 0x01)) {
- rtemp8 = *(phymap + eFuse_Addr);
- eFuse_Addr++;
- eFuseWord[offset][i] = (rtemp8 & 0xff);
- if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
- break;
- rtemp8 = *(phymap + eFuse_Addr);
- eFuse_Addr++;
- eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00);
-
- if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
- break;
- }
- wren >>= 1;
- }
- }
- /* Read next PG header */
- rtemp8 = *(phymap + eFuse_Addr);
-
- if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
- eFuse_Addr++;
- }
- }
-
- /* */
- /* 3. Collect 16 sections and 4 word unit into Efuse map. */
- /* */
- for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
- efuseTbl[(i * 8) + (j * 2)] = (eFuseWord[i][j] & 0xff);
- efuseTbl[(i * 8) + ((j * 2) + 1)] = ((eFuseWord[i][j] >> 8) & 0xff);
- }
- }
-
- /* */
- /* 4. Copy from Efuse map to output pointer memory!!! */
- /* */
- memcpy(pbuf, efuseTbl, _size_byte);
-
-exit:
- kfree(efuseTbl);
- kfree(eFuseWord);
-}
-
-/* FIXME: add error handling in callers */
-static int efuse_read_phymap_from_txpktbuf(
- struct adapter *adapter,
- u8 *content, /* buffer to store efuse physical map */
- u16 *size /* for efuse content: the max byte to read. will update to byte read */
- )
-{
- unsigned long timeout;
- __le32 lo32 = 0, hi32 = 0;
- u16 len = 0, count = 0;
- int i = 0, res;
- u16 limit = *size;
- u8 reg;
- u8 *pos = content;
- u32 reg32;
-
- rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
-
- while (1) {
- rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, i);
-
- rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
- timeout = jiffies + msecs_to_jiffies(1000);
- do {
- res = rtw_read8(adapter, REG_TXPKTBUF_DBG, &reg);
- if (res)
- continue;
-
- if (reg)
- break;
-
- msleep(1);
- } while (time_before(jiffies, timeout));
-
- /* data from EEPROM needs to be in LE */
- res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L, &reg32);
- if (res)
- return res;
-
- lo32 = cpu_to_le32(reg32);
-
- res = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H, &reg32);
- if (res)
- return res;
-
- hi32 = cpu_to_le32(reg32);
-
- if (i == 0) {
- u16 reg;
-
- /* Although lenc is only used in a debug statement,
- * do not remove it as the rtw_read16() call consumes
- * 2 bytes from the EEPROM source.
- */
- res = rtw_read16(adapter, REG_PKTBUF_DBG_DATA_L, &reg);
- if (res)
- return res;
-
- len = le32_to_cpu(lo32) & 0x0000ffff;
-
- limit = (len - 2 < limit) ? len - 2 : limit;
-
- memcpy(pos, ((u8 *)&lo32) + 2, (limit >= count + 2) ? 2 : limit - count);
- count += (limit >= count + 2) ? 2 : limit - count;
- pos = content + count;
- } else {
- memcpy(pos, ((u8 *)&lo32), (limit >= count + 4) ? 4 : limit - count);
- count += (limit >= count + 4) ? 4 : limit - count;
- pos = content + count;
- }
-
- if (limit > count && len - 2 > count) {
- memcpy(pos, (u8 *)&hi32, (limit >= count + 4) ? 4 : limit - count);
- count += (limit >= count + 4) ? 4 : limit - count;
- pos = content + count;
- }
-
- if (limit <= count || len - 2 <= count)
- break;
- i++;
- }
- rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
- *size = count;
-
- return 0;
-}
-
-static s32 iol_read_efuse(struct adapter *padapter, u16 size_byte, u8 *logical_map)
-{
- s32 status = _FAIL;
- u8 physical_map[512];
- u16 size = 512;
-
- rtw_write8(padapter, REG_TDECTRL + 1, 0);
- memset(physical_map, 0xFF, 512);
- rtw_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
- status = iol_execute(padapter, CMD_READ_EFUSE_MAP);
- if (status == _SUCCESS)
- efuse_read_phymap_from_txpktbuf(padapter, physical_map, &size);
- efuse_phymap_to_logical(physical_map, size_byte, logical_map);
- return status;
-}
-
-s32 rtl8188e_iol_efuse_patch(struct adapter *padapter)
-{
- s32 result = _SUCCESS;
-
- if (rtw_IOL_applied(padapter)) {
- iol_mode_enable(padapter, 1);
- result = iol_execute(padapter, CMD_READ_EFUSE_MAP);
- if (result == _SUCCESS)
- result = iol_execute(padapter, CMD_EFUSE_PATCH);
-
- iol_mode_enable(padapter, 0);
- }
- return result;
-}
-
-static s32 iol_ioconfig(struct adapter *padapter, u8 iocfg_bndy)
-{
- s32 rst = _SUCCESS;
-
- rtw_write8(padapter, REG_TDECTRL + 1, iocfg_bndy);
- rst = iol_execute(padapter, CMD_IOCONFIG);
- return rst;
-}
-
-int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
-{
- struct pkt_attrib *pattrib = &xmit_frame->attrib;
- u8 i;
- int ret = _FAIL;
-
- if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
- goto exit;
- if (rtw_usb_bulk_size_boundary(adapter, TXDESC_SIZE + pattrib->last_txcmdsz)) {
- if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
- goto exit;
- }
-
- dump_mgntframe_and_wait(adapter, xmit_frame, max_wating_ms);
-
- iol_mode_enable(adapter, 1);
- for (i = 0; i < bndy_cnt; i++) {
- u8 page_no = 0;
- page_no = i * 2;
- ret = iol_ioconfig(adapter, page_no);
- if (ret != _SUCCESS)
- break;
- }
- iol_mode_enable(adapter, 0);
-exit:
- /* restore BCN_HEAD */
- rtw_write8(adapter, REG_TDECTRL + 1, 0);
- return ret;
-}
-
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState)
-{
- u16 tmpV16;
- int res;
-
- if (PwrState) {
- rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
-
- /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
- res = rtw_read16(pAdapter, REG_SYS_ISO_CTRL, &tmpV16);
- if (res)
- return;
-
- if (!(tmpV16 & PWC_EV12V)) {
- tmpV16 |= PWC_EV12V;
- rtw_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16);
- }
- /* Reset: 0x0000h[28], default valid */
- res = rtw_read16(pAdapter, REG_SYS_FUNC_EN, &tmpV16);
- if (res)
- return;
-
- if (!(tmpV16 & FEN_ELDR)) {
- tmpV16 |= FEN_ELDR;
- rtw_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16);
- }
-
- /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
- res = rtw_read16(pAdapter, REG_SYS_CLKR, &tmpV16);
- if (res)
- return;
-
- if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
- tmpV16 |= (LOADER_CLK_EN | ANA8M);
- rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
- }
- } else {
- rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
- }
-}
-
-static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
- u16 _offset,
- u16 _size_byte,
- u8 *pbuf)
-{
- u8 *efuseTbl = NULL;
- u8 rtemp8[1];
- u16 eFuse_Addr = 0;
- u8 offset, wren;
- u16 i, j;
- u16 **eFuseWord = NULL;
- u16 efuse_utilized = 0;
- u8 u1temp = 0;
-
- /* */
- /* Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10. */
- /* */
- if ((_offset + _size_byte) > EFUSE_MAP_LEN_88E) /* total E-Fuse table is 512bytes */
- goto exit;
-
- efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
- if (!efuseTbl)
- goto exit;
-
- eFuseWord = rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
- if (!eFuseWord)
- goto exit;
-
- /* 0. Refresh efuse init map as all oxFF. */
- for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
- eFuseWord[i][j] = 0xFFFF;
-
- /* */
- /* 1. Read the first byte to check if efuse is empty!!! */
- /* */
- /* */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
- if (*rtemp8 != 0xFF) {
- efuse_utilized++;
- eFuse_Addr++;
- } else {
- goto exit;
- }
-
- /* */
- /* 2. Read real efuse content. Filter PG header and every section data. */
- /* */
- while ((*rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
- /* Check PG header for section num. */
- if ((*rtemp8 & 0x1F) == 0x0F) { /* extended header */
- u1temp = ((*rtemp8 & 0xE0) >> 5);
-
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
-
- if ((*rtemp8 & 0x0F) == 0x0F) {
- eFuse_Addr++;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
-
- if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
- eFuse_Addr++;
- continue;
- } else {
- offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
- wren = (*rtemp8 & 0x0F);
- eFuse_Addr++;
- }
- } else {
- offset = ((*rtemp8 >> 4) & 0x0f);
- wren = (*rtemp8 & 0x0f);
- }
-
- if (offset < EFUSE_MAX_SECTION_88E) {
- /* Get word enable value from PG header */
-
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(wren & 0x01)) {
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
- eFuse_Addr++;
- efuse_utilized++;
- eFuseWord[offset][i] = (*rtemp8 & 0xff);
- if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
- break;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
- eFuse_Addr++;
- efuse_utilized++;
- eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
- if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
- break;
- }
- wren >>= 1;
- }
- }
-
- /* Read next PG header */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
-
- if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
- efuse_utilized++;
- eFuse_Addr++;
- }
- }
-
- /* 3. Collect 16 sections and 4 word unit into Efuse map. */
- for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
- for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
- efuseTbl[(i * 8) + (j * 2)] = (eFuseWord[i][j] & 0xff);
- efuseTbl[(i * 8) + ((j * 2) + 1)] = ((eFuseWord[i][j] >> 8) & 0xff);
- }
- }
-
- /* 4. Copy from Efuse map to output pointer memory!!! */
- for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuseTbl[_offset + i];
-
-exit:
- kfree(efuseTbl);
- kfree(eFuseWord);
-}
-
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf)
-{
- int ret = _FAIL;
- if (rtw_IOL_applied(Adapter)) {
- rtl8188eu_InitPowerOn(Adapter);
-
- iol_mode_enable(Adapter, 1);
- ret = iol_read_efuse(Adapter, _size_byte, pbuf);
- iol_mode_enable(Adapter, 0);
-
- if (_SUCCESS == ret)
- return;
- }
-
- Hal_EfuseReadEFuse88E(Adapter, 0, _size_byte, pbuf);
-}
-
-static void dump_chip_info(struct adapter *adapter, struct HAL_VERSION chip_vers)
-{
- struct net_device *netdev = adapter->pnetdev;
- char *cut = NULL;
- char buf[25];
-
- switch (chip_vers.CUTVersion) {
- case A_CUT_VERSION:
- cut = "A_CUT";
- break;
- case B_CUT_VERSION:
- cut = "B_CUT";
- break;
- case C_CUT_VERSION:
- cut = "C_CUT";
- break;
- case D_CUT_VERSION:
- cut = "D_CUT";
- break;
- case E_CUT_VERSION:
- cut = "E_CUT";
- break;
- default:
- snprintf(buf, sizeof(buf), "UNKNOWN_CUT(%d)", chip_vers.CUTVersion);
- cut = buf;
- break;
- }
-
- netdev_dbg(netdev, "Chip Version Info: CHIP_8188E_%s_%s_%s_1T1R_RomVer(%d)\n",
- IS_NORMAL_CHIP(chip_vers) ? "Normal_Chip" : "Test_Chip",
- IS_CHIP_VENDOR_TSMC(chip_vers) ? "TSMC" : "UMC",
- cut, 0);
-}
-
-void rtl8188e_read_chip_version(struct adapter *padapter)
-{
- u32 value32;
- struct HAL_VERSION ChipVersion;
- struct hal_data_8188e *pHalData = &padapter->haldata;
- int res;
-
- res = rtw_read32(padapter, REG_SYS_CFG, &value32);
- if (res)
- return;
-
- ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
-
- ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
- ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
-
- dump_chip_info(padapter, ChipVersion);
-
- pHalData->VersionID = ChipVersion;
-}
-
-void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
- struct sta_info *psta = (struct sta_info *)pValue1;
-
- if (bSet) {
- podmpriv->pODM_StaInfo[psta->mac_id] = psta;
- ODM_RAInfo_Init(podmpriv, psta->mac_id);
- } else {
- podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
- }
-}
-
-void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
-{
- int res;
- u8 reg;
-
- res = rtw_read8(adapter, rOFDM0_RxDSP + 1, &reg);
- if (res)
- return;
-
- if (enable)
- rtw_write8(adapter, rOFDM0_RxDSP + 1, reg | BIT(1));
- else
- rtw_write8(adapter, rOFDM0_RxDSP + 1, reg & ~BIT(1));
-}
-
-/* */
-/* */
-/* LLT R/W/Init function */
-/* */
-/* */
-static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
-{
- s32 count;
- u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
- u16 LLTReg = REG_LLT_INIT;
- int res;
-
- rtw_write32(padapter, LLTReg, value);
-
- /* polling */
- for (count = 0; count <= POLLING_LLT_THRESHOLD; count++) {
- res = rtw_read32(padapter, LLTReg, &value);
- if (res)
- continue;
-
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
- break;
- }
-
- return count > POLLING_LLT_THRESHOLD ? _FAIL : _SUCCESS;
-}
-
-s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
-{
- s32 status = _FAIL;
- u32 i;
- u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;/* 176, 22k */
-
- if (rtw_IOL_applied(padapter)) {
- status = iol_InitLLTTable(padapter, txpktbuf_bndy);
- } else {
- for (i = 0; i < (txpktbuf_bndy - 1); i++) {
- status = _LLTWrite(padapter, i, i + 1);
- if (_SUCCESS != status)
- return status;
- }
-
- /* end of list */
- status = _LLTWrite(padapter, (txpktbuf_bndy - 1), 0xFF);
- if (_SUCCESS != status)
- return status;
-
- /* Make the other pages as ring buffer */
- /* This ring buffer is used as beacon buffer if we config this MAC as two MAC transfer. */
- /* Otherwise used as local loopback buffer. */
- for (i = txpktbuf_bndy; i < Last_Entry_Of_TxPktBuf; i++) {
- status = _LLTWrite(padapter, i, (i + 1));
- if (_SUCCESS != status)
- return status;
- }
-
- /* Let last entry point to the start entry of ring buffer */
- status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
- if (_SUCCESS != status) {
- return status;
- }
- }
-
- return status;
-}
-
-void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo)
-{
- struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
- struct net_device *netdev = padapter->pnetdev;
- u16 EEPROMId;
-
- /* Check 0x8129 again for making sure autoload status!! */
- EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
- if (EEPROMId != RTL_EEPROM_ID) {
- pr_err("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
- pEEPROM->bautoload_fail_flag = true;
- } else {
- pEEPROM->bautoload_fail_flag = false;
- }
-
- netdev_dbg(netdev, "EEPROM ID = 0x%04x\n", EEPROMId);
-}
-
-static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
-{
- u32 rfPath, eeAddr = EEPROM_TX_PWR_INX_88E, group, TxCount = 0;
-
- memset(pwrInfo24G, 0, sizeof(struct txpowerinfo24g));
-
- if (AutoLoadFail) {
- for (rfPath = 0; rfPath < RF_PATH_MAX; rfPath++) {
- /* 2.4G default value */
- for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
- pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
- pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
- }
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrInfo24G->BW20_Diff[rfPath][0] = EEPROM_DEFAULT_24G_HT20_DIFF;
- pwrInfo24G->OFDM_Diff[rfPath][0] = EEPROM_DEFAULT_24G_OFDM_DIFF;
- } else {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- }
- }
- }
- return;
- }
-
- for (rfPath = 0; rfPath < RF_PATH_MAX; rfPath++) {
- /* 2.4G default value */
- for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
- pwrInfo24G->IndexCCK_Base[rfPath][group] = PROMContent[eeAddr++];
- if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF)
- pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
- }
- for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
- pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++];
- if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF)
- pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
- }
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrInfo24G->BW40_Diff[rfPath][TxCount] = 0;
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF;
- } else {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
- if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
- }
-
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF;
- } else {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
- if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
- }
- pwrInfo24G->CCK_Diff[rfPath][TxCount] = 0;
- eeAddr++;
- } else {
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- } else {
- pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
- if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0;
- }
-
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- } else {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
- if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
- }
- eeAddr++;
-
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- } else {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
- if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
- }
-
- if (PROMContent[eeAddr] == 0xFF) {
- pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
- } else {
- pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
- if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
- pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0;
- }
- eeAddr++;
- }
- }
- }
-}
-
-static void hal_get_chnl_group_88e(u8 chnl, u8 *group)
-{
- if (chnl < 3) /* Channel 1-2 */
- *group = 0;
- else if (chnl < 6) /* Channel 3-5 */
- *group = 1;
- else if (chnl < 9) /* Channel 6-8 */
- *group = 2;
- else if (chnl < 12) /* Channel 9-11 */
- *group = 3;
- else if (chnl < 14) /* Channel 12-13 */
- *group = 4;
- else if (chnl == 14) /* Channel 14 */
- *group = 5;
-}
-
-void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- if (AutoLoadFail)
- padapter->pwrctrlpriv.bSupportRemoteWakeup = false;
- else
- /* hw power down mode selection , 0:rf-off / 1:power down */
-
- /* decide hw if support remote wakeup function */
- /* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
- padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
-}
-
-void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = &padapter->haldata;
- struct txpowerinfo24g pwrInfo24G;
- u8 ch, group;
- u8 TxCount;
-
- Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
-
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- hal_get_chnl_group_88e(ch, &group);
-
- pHalData->Index24G_CCK_Base[ch] = pwrInfo24G.IndexCCK_Base[0][group];
- if (ch == 14)
- pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][4];
- else
- pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][group];
- }
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->OFDM_24G_Diff[TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
- pHalData->BW20_24G_Diff[TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
- }
-
- /* 2010/10/19 MH Add Regulator recognize for CU. */
- if (!AutoLoadFail) {
- pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x7); /* bit0~2 */
- if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION & 0x7); /* bit0~2 */
- } else {
- pHalData->EEPROMRegulatory = 0;
- }
-}
-
-void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = &pAdapter->haldata;
-
- if (!AutoLoadFail) {
- pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
- if (pHalData->CrystalCap == 0xFF)
- pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
- } else {
- pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
- }
-}
-
-void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- padapter->mlmepriv.ChannelPlan =
- hal_com_get_channel_plan(padapter,
- hwinfo ? hwinfo[EEPROM_ChannelPlan_88E] : 0xFF,
- padapter->registrypriv.channel_plan,
- RT_CHANNEL_DOMAIN_WORLD_WIDE_13, AutoLoadFail);
-}
-
-void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = &pAdapter->haldata;
- struct registry_priv *registry_par = &pAdapter->registrypriv;
-
- if (!AutoLoadFail) {
- /* Antenna Diversity setting. */
- if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
- pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x18) >> 3;
- if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION & 0x18) >> 3;
- } else {
- pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
- }
-
- if (registry_par->antdiv_type == 0) {
- /* If TRxAntDivType is AUTO in advanced setting, use EFUSE value instead. */
- pHalData->TRxAntDivType = PROMContent[EEPROM_RF_ANTENNA_OPT_88E];
- if (pHalData->TRxAntDivType == 0xFF)
- pHalData->TRxAntDivType = CG_TRX_HW_ANTDIV; /* For 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
- } else {
- pHalData->TRxAntDivType = registry_par->antdiv_type;
- }
-
- if (pHalData->TRxAntDivType == CG_TRX_HW_ANTDIV || pHalData->TRxAntDivType == CGCS_RX_HW_ANTDIV)
- pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
- } else {
- pHalData->AntDivCfg = 0;
- }
-}
-
-void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- /* ThermalMeter from EEPROM */
- if (!AutoloadFail)
- pHalData->EEPROMThermalMeter = PROMContent[EEPROM_THERMAL_METER_88E];
- else
- pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
-
- if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail)
- pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
deleted file mode 100644
index f4edf4a8f5c2..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_PHYCFG_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_iol.h"
-#include "../include/rtl8188e_hal.h"
-
-/* */
-/* 1. BB register R/W API */
-/* */
-
-/* Get shifted position of the bit mask */
-static u32 phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-
-/**
-* Function: PHY_QueryBBReg
-*
-* Overview: Read "sepcific bits" from BB register
-*
-* Input:
-* struct adapter *Adapter,
-* u32 RegAddr, The target address to be readback
-* u32 BitMask The target bit position in the target address
-* to be readback
-* Output: None
-* Return: u32 Data The readback register value
-* Note: This function is equal to "GetRegSetting" in PHY programming guide
-*/
-u32
-rtl8188e_PHY_QueryBBReg(
- struct adapter *Adapter,
- u32 RegAddr,
- u32 BitMask
- )
-{
- u32 ReturnValue = 0, OriginalValue, BitShift;
- int res;
-
- res = rtw_read32(Adapter, RegAddr, &OriginalValue);
- if (res)
- return 0;
-
- BitShift = phy_calculate_bit_shift(BitMask);
- ReturnValue = (OriginalValue & BitMask) >> BitShift;
- return ReturnValue;
-}
-
-/**
-* Function: PHY_SetBBReg
-*
-* Overview: Write "Specific bits" to BB register (page 8~)
-*
-* Input:
-* struct adapter *Adapter,
-* u32 RegAddr, The target address to be modified
-* u32 BitMask The target bit position in the target address
-* to be modified
-* u32 Data The new register value in the target bit position
-* of the target address
-*
-* Output: None
-* Return: None
-* Note: This function is equal to "PutRegSetting" in PHY programming guide
-*/
-
-void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
-{
- u32 OriginalValue, BitShift;
- int res;
-
- if (BitMask != bMaskDWord) { /* if not "double word" write */
- res = rtw_read32(Adapter, RegAddr, &OriginalValue);
- if (res)
- return;
-
- BitShift = phy_calculate_bit_shift(BitMask);
- Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
- }
-
- rtw_write32(Adapter, RegAddr, Data);
-}
-
-/* */
-/* 2. RF register R/W API */
-/* */
-/**
-* Function: phy_RFSerialRead
-*
-* Overview: Read register from RF chips
-*
-* Input:
-* struct adapter *Adapter,
-* u32 Offset, The target address to be read
-*
-* Output: None
-* Return: u32 reback value
-* Note: Threre are three types of serial operations:
-* 1. Software serial write
-* 2. Hardware LSSI-Low Speed Serial Interface
-* 3. Hardware HSSI-High speed
-* serial write. Driver need to implement (1) and (2).
-* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
-*/
-static u32
-phy_RFSerialRead(
- struct adapter *Adapter,
- u32 Offset
- )
-{
- u32 retValue = 0;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef;
- u32 NewOffset;
- u32 tmplong, tmplong2;
- u8 RfPiEnable = 0;
- /* */
- /* Make sure RF register offset is correct */
- /* */
- Offset &= 0xff;
-
- /* */
- /* Switch page for 8256 RF IC */
- /* */
- NewOffset = Offset;
-
- /* For 92S LSSI Read RFLSSIRead */
- /* For RF A/B write 0x824/82c(does not work in the future) */
- /* We must use 0x824 for RF A and B to execute read trigger */
- tmplong = rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
- tmplong2 = tmplong;
-
- tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset << 23) | bLSSIReadEdge; /* T65 RF */
-
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong & (~bLSSIReadEdge));
- udelay(10);/* PlatformStallExecution(10); */
-
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
- udelay(100);/* PlatformStallExecution(100); */
-
- udelay(10);/* PlatformStallExecution(10); */
-
- RfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT(8));
-
- if (RfPiEnable) { /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
- retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
- } else { /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
- retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
- }
- return retValue;
-}
-
-/**
-* Function: phy_RFSerialWrite
-*
-* Overview: Write data to RF register (page 8~)
-*
-* Input:
-* struct adapter *Adapter,
-* enum rf_radio_path eRFPath, Radio path of A/B/C/D
-* u32 Offset, The target address to be read
-* u32 Data The new register Data in the target bit position
-* of the target to be read
-*
-* Output: None
-* Return: None
-* Note: Threre are three types of serial operations:
-* 1. Software serial write
-* 2. Hardware LSSI-Low Speed Serial Interface
-* 3. Hardware HSSI-High speed
-* serial write. Driver need to implement (1) and (2).
-* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
- *
- * Note: For RF8256 only
- * The total count of RTL8256(Zebra4) register is around 36 bit it only employs
- * 4-bit RF address. RTL8256 uses "register mode control bit" (Reg00[12], Reg00[10])
- * to access register address bigger than 0xf. See "Appendix-4 in PHY Configuration
- * programming guide" for more details.
- * Thus, we define a sub-finction for RTL8526 register address conversion
- * ===========================================================
- * Register Mode RegCTL[1] RegCTL[0] Note
- * (Reg00[12]) (Reg00[10])
- * ===========================================================
- * Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
- * ------------------------------------------------------------------
- * Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
- * ------------------------------------------------------------------
- * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
- * ------------------------------------------------------------------
- *
- * 2008/09/02 MH Add 92S RF definition
- *
- *
- *
-*/
-static void
-phy_RFSerialWrite(
- struct adapter *Adapter,
- u32 Offset,
- u32 Data
- )
-{
- u32 DataAndAddr = 0;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef;
- u32 NewOffset;
-
- /* 2009/06/17 MH We can not execute IO for power save or other accident mode. */
-
- Offset &= 0xff;
-
- /* */
- /* Switch page for 8256 RF IC */
- /* */
- NewOffset = Offset;
-
- /* */
- /* Put write addr in [5:0] and write data in [31:16] */
- /* */
- DataAndAddr = ((NewOffset << 20) | (Data & 0x000fffff)) & 0x0fffffff; /* T65 RF */
-
- /* */
- /* Write Operation */
- /* */
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
-}
-
-/**
-* Function: PHY_QueryRFReg
-*
-* Overview: Query "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct adapter *Adapter,
-* u32 RegAddr, The target address to be read
-* u32 BitMask The target bit position in the target address
-* to be read
-*
-* Output: None
-* Return: u32 Readback value
-* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
-*/
-u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
-{
- u32 Original_Value, Readback_Value, BitShift;
-
- Original_Value = phy_RFSerialRead(Adapter, RegAddr);
-
- BitShift = phy_calculate_bit_shift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
- return Readback_Value;
-}
-
-/**
-* Function: PHY_SetRFReg
-*
-* Overview: Write "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct adapter *Adapter,
-* u32 RegAddr, The target address to be modified
-* u32 BitMask The target bit position in the target address
-* to be modified
-* u32 Data The new register Data in the target bit position
-* of the target address
-*
-* Output: None
-* Return: None
-* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
-*/
-void
-rtl8188e_PHY_SetRFReg(
- struct adapter *Adapter,
- u32 RegAddr,
- u32 BitMask,
- u32 Data
- )
-{
- u32 Original_Value, BitShift;
-
- /* RF data is 12 bits only */
- if (BitMask != bRFRegOffsetMask) {
- Original_Value = phy_RFSerialRead(Adapter, RegAddr);
- BitShift = phy_calculate_bit_shift(BitMask);
- Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
- }
-
- phy_RFSerialWrite(Adapter, RegAddr, Data);
-}
-
-/* */
-/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
-/* */
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_MACConfig8192C
- *
- * Overview: Condig MAC by header file or parameter file.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 08/12/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-int PHY_MACConfig8188E(struct adapter *Adapter)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- int err;
-
- /* */
- /* Config MAC */
- /* */
- err = ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv);
-
- /* 2010.07.13 AMPDU aggregation number B */
- rtw_write16(Adapter, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
-
- return err;
-}
-
-/**
-* Function: phy_InitBBRFRegisterDefinition
-*
-* Overview: Initialize Register definition offset for Radio Path A/B/C/D
-*
-* Input:
-* struct adapter *Adapter,
-*
-* Output: None
-* Return: None
-* Note: The initialization value is constant and it should never be changes
-*/
-static void
-phy_InitBBRFRegisterDefinition(
- struct adapter *Adapter
-)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- /* RF Interface Sowrtware Control */
- pHalData->PHYRegDef.rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
-
- /* RF Interface Readback Value */
- pHalData->PHYRegDef.rfintfi = rFPGA0_XAB_RFInterfaceRB; /* 16 LSBs if read 32-bit from 0x8E0 */
-
- /* RF Interface Output (and Enable) */
- pHalData->PHYRegDef.rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
-
- /* RF Interface (Output and) Enable */
- pHalData->PHYRegDef.rfintfe = rFPGA0_XA_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
-
- /* Addr of LSSI. Write RF register by driver */
- pHalData->PHYRegDef.rf3wireOffset = rFPGA0_XA_LSSIParameter; /* LSSI Parameter */
-
- /* RF parameter */
- pHalData->PHYRegDef.rfLSSI_Select = rFPGA0_XAB_RFParameter; /* BB Band Select */
-
- /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
- pHalData->PHYRegDef.rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
-
- /* Transceiver A~D HSSI Parameter-1 */
- pHalData->PHYRegDef.rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
-
- /* Transceiver A~D HSSI Parameter-2 */
- pHalData->PHYRegDef.rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
-
- /* RF switch Control */
- pHalData->PHYRegDef.rfSwitchControl = rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
-
- /* AGC control 1 */
- pHalData->PHYRegDef.rfAGCControl1 = rOFDM0_XAAGCCore1;
-
- /* AGC control 2 */
- pHalData->PHYRegDef.rfAGCControl2 = rOFDM0_XAAGCCore2;
-
- /* RX AFE control 1 */
- pHalData->PHYRegDef.rfRxIQImbalance = rOFDM0_XARxIQImbalance;
-
- /* RX AFE control 1 */
- pHalData->PHYRegDef.rfRxAFE = rOFDM0_XARxAFE;
-
- /* Tx AFE control 1 */
- pHalData->PHYRegDef.rfTxIQImbalance = rOFDM0_XATxIQImbalance;
-
- /* Tx AFE control 2 */
- pHalData->PHYRegDef.rfTxAFE = rOFDM0_XATxAFE;
-
- /* Transceiver LSSI Readback SI mode */
- pHalData->PHYRegDef.rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
-
- /* Transceiver LSSI Readback PI mode */
- pHalData->PHYRegDef.rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
-}
-
-void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- if (RegAddr == rTxAGC_A_Rate18_06)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
- if (RegAddr == rTxAGC_A_Rate54_24)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][1] = Data;
- if (RegAddr == rTxAGC_A_CCK1_Mcs32)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][6] = Data;
- if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0xffffff00)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][7] = Data;
- if (RegAddr == rTxAGC_A_Mcs03_Mcs00)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][2] = Data;
- if (RegAddr == rTxAGC_A_Mcs07_Mcs04)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][3] = Data;
- if (RegAddr == rTxAGC_A_Mcs11_Mcs08)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
- if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
- pHalData->pwrGroupCnt++;
- }
- if (RegAddr == rTxAGC_B_Rate18_06)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
- if (RegAddr == rTxAGC_B_Rate54_24)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][9] = Data;
- if (RegAddr == rTxAGC_B_CCK1_55_Mcs32)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][14] = Data;
- if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0x000000ff)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][15] = Data;
- if (RegAddr == rTxAGC_B_Mcs03_Mcs00)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][10] = Data;
- if (RegAddr == rTxAGC_B_Mcs07_Mcs04)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
- if (RegAddr == rTxAGC_B_Mcs11_Mcs08)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
- if (RegAddr == rTxAGC_B_Mcs15_Mcs12)
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
-}
-
-static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
-{
- struct eeprom_priv *pEEPROM = &Adapter->eeprompriv;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- int err;
-
- /* */
- /* 1. Read PHY_REG.TXT BB INIT!! */
- /* We will separate as 88C / 92C according to chip version */
- /* */
- err = ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv);
- if (err)
- return err;
-
- /* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
- if (!pEEPROM->bautoload_fail_flag) {
- pHalData->pwrGroupCnt = 0;
- ODM_ReadAndConfig_PHY_REG_PG_8188E(&pHalData->odmpriv);
- }
-
- /* 3. BB AGC table Initialization */
- err = ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv);
- if (err)
- return err;
-
- return 0;
-}
-
-int
-PHY_BBConfig8188E(
- struct adapter *Adapter
- )
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u16 RegVal;
- u8 CrystalCap;
- int err;
-
- phy_InitBBRFRegisterDefinition(Adapter);
-
- /* Enable BB and RF */
- err = rtw_read16(Adapter, REG_SYS_FUNC_EN, &RegVal);
- if (err)
- return err;
-
- rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal | BIT(13) | BIT(0) | BIT(1)));
-
- /* 20090923 Joseph: Advised by Steven and Jenyu. Power sequence before init RF. */
-
- rtw_write8(Adapter, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
-
- rtw_write8(Adapter, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | FEN_BB_GLB_RSTn | FEN_BBRSTB);
-
- /* Config BB and AGC */
- err = phy_BB8188E_Config_ParaFile(Adapter);
-
- /* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
- CrystalCap = pHalData->CrystalCap & 0x3F;
- rtl8188e_PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
-
- return err;
-}
-
-static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
- u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
- u8 *BW40PowerLevel)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u8 index = (channel - 1);
-
- /* 1. CCK */
- cckPowerLevel[RF_PATH_A] = pHalData->Index24G_CCK_Base[index];
- /* 2. OFDM */
- ofdmPowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
- pHalData->OFDM_24G_Diff[RF_PATH_A];
- /* 1. BW20 */
- BW20PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
- pHalData->BW20_24G_Diff[RF_PATH_A];
- /* 2. BW40 */
- BW40PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index];
-}
-
-/*-----------------------------------------------------------------------------
- * Function: SetTxPowerLevel8190()
- *
- * Overview: This function is export to "HalCommon" moudule
- * We must consider RF path later!!!!!!!
- *
- * Input: struct adapter *Adapter
- * u8 channel
- *
- * Output: NONE
- *
- * Return: NONE
- * 2008/11/04 MHC We remove EEPROM_93C56.
- * We need to move CCX relative code to independet file.
- * 2009/01/21 MHC Support new EEPROM format from SD3 requirement.
- *
- *---------------------------------------------------------------------------*/
-void
-PHY_SetTxPowerLevel8188E(
- struct adapter *Adapter,
- u8 channel
- )
-{
- u8 cckPowerLevel[MAX_TX_COUNT] = {0};
- u8 ofdmPowerLevel[MAX_TX_COUNT] = {0};/* [0]:RF-A, [1]:RF-B */
- u8 BW20PowerLevel[MAX_TX_COUNT] = {0};
- u8 BW40PowerLevel[MAX_TX_COUNT] = {0};
-
- getTxPowerIndex88E(Adapter, channel, &cckPowerLevel[0], &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0]);
-
- rtl8188e_PHY_RF6052SetCckTxPower(Adapter, &cckPowerLevel[0]);
- rtl8188e_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0], channel);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_SetBWModeCallback8192C()
- *
- * Overview: Timer callback function for SetSetBWMode
- *
- * Input: PRT_TIMER pTimer
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note: (1) We do not take j mode into consideration now
- * (2) Will two workitem of "switch channel" and "switch channel bandwidth" run
- * concurrently?
- *---------------------------------------------------------------------------*/
-static void
-_PHY_SetBWMode92C(
- struct adapter *Adapter
-)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u8 regBwOpMode;
- u8 regRRSR_RSC;
- int res;
-
- if (Adapter->bDriverStopped)
- return;
-
- /* 3 */
- /* 3<1>Set MAC register */
- /* 3 */
-
- res = rtw_read8(Adapter, REG_BWOPMODE, &regBwOpMode);
- if (res)
- return;
-
- res = rtw_read8(Adapter, REG_RRSR + 2, &regRRSR_RSC);
- if (res)
- return;
-
- switch (pHalData->CurrentChannelBW) {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- /* 2007/02/07 Mark by Emily because we have not verify whether this register works */
- rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
- break;
- case HT_CHANNEL_WIDTH_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- /* 2007/02/07 Mark by Emily because we have not verify whether this register works */
- rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
- regRRSR_RSC = (regRRSR_RSC & 0x90) | (pHalData->nCur40MhzPrimeSC << 5);
- rtw_write8(Adapter, REG_RRSR + 2, regRRSR_RSC);
- break;
- default:
- break;
- }
-
- /* 3 */
- /* 3 <2>Set PHY related register */
- /* 3 */
- switch (pHalData->CurrentChannelBW) {
- /* 20 MHz channel*/
- case HT_CHANNEL_WIDTH_20:
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
- break;
- /* 40 MHz channel*/
- case HT_CHANNEL_WIDTH_40:
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
- /* Set Control channel to upper or lower. These settings are required only for 40MHz */
- rtl8188e_PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC >> 1));
- rtl8188e_PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
- rtl8188e_PHY_SetBBReg(Adapter, 0x818, (BIT(26) | BIT(27)),
- (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
- break;
- default:
- break;
- }
- /* Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 */
-
- rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
-}
-
- /*-----------------------------------------------------------------------------
- * Function: SetBWMode8190Pci()
- *
- * Overview: This function is export to "HalCommon" moudule
- *
- * Input: struct adapter *Adapter
- * enum ht_channel_width Bandwidth 20M or 40M
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note: We do not take j mode into consideration now
- *---------------------------------------------------------------------------*/
-void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth, /* 20M or 40M */
- unsigned char Offset) /* Upper, Lower, or Don't care */
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
-
- pHalData->CurrentChannelBW = Bandwidth;
-
- pHalData->nCur40MhzPrimeSC = Offset;
-
- if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved))
- _PHY_SetBWMode92C(Adapter);
- else
- pHalData->CurrentChannelBW = tmpBW;
-}
-
-static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
-{
- u32 param1, param2;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- /* s1. pre common command - CmdID_SetTxPowerLevel */
- PHY_SetTxPowerLevel8188E(Adapter, channel);
-
- /* s2. RF dependent command - CmdID_RF_WriteReg, param1=RF_CHNLBW, param2=channel */
- param1 = RF_CHNLBW;
- param2 = channel;
- pHalData->RfRegChnlVal = ((pHalData->RfRegChnlVal & 0xfffffc00) | param2);
- rtl8188e_PHY_SetRFReg(Adapter, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal);
-}
-
-void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
-{
- /* Call after initialization */
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- if (channel == 0)
- channel = 1;
-
- if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
- pHalData->CurrentChannel = channel;
- _PHY_SwChnl8192C(Adapter, channel);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
deleted file mode 100644
index 1988fb6e780a..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ /dev/null
@@ -1,405 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-/******************************************************************************
- *
- *
- * Module: rtl8192c_rf6052.c ( Source C File)
- *
- * Note: Provide RF 6052 series relative API.
- *
- * Function:
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- *
- * 09/25/2008 MHC Create initial version.
- * 11/05/2008 MHC Add API for tw power setting.
- *
- *
-******************************************************************************/
-
-#define _RTL8188E_RF6052_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetBandwidth()
- *
- * Overview: This function is called by SetBWModeCallback8190Pci() only
- *
- * Input: struct adapter *Adapter
- * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note: For RF type 0222D
- *---------------------------------------------------------------------------*/
-void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
- enum ht_channel_width Bandwidth)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
-
- switch (Bandwidth) {
- case HT_CHANNEL_WIDTH_20:
- pHalData->RfRegChnlVal = ((pHalData->RfRegChnlVal & 0xfffff3ff) | BIT(10) | BIT(11));
- rtl8188e_PHY_SetRFReg(Adapter, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal);
- break;
- case HT_CHANNEL_WIDTH_40:
- pHalData->RfRegChnlVal = ((pHalData->RfRegChnlVal & 0xfffff3ff) | BIT(10));
- rtl8188e_PHY_SetRFReg(Adapter, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal);
- break;
- default:
- break;
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetCckTxPower
- *
- * Overview:
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/05/2008 MHC Simulate 8192series..
- *
- *---------------------------------------------------------------------------*/
-
-void
-rtl8188e_PHY_RF6052SetCckTxPower(
- struct adapter *Adapter,
- u8 *pPowerlevel)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
- u8 idx1, idx2;
- u8 *ptr;
- u8 direction;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- TxAGC[RF_PATH_A] = 0x3f3f3f3f;
- TxAGC[RF_PATH_B] = 0x3f3f3f3f;
-
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
- }
- } else {
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
- }
- if (pHalData->EEPROMRegulatory == 0) {
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][7] << 8);
- TxAGC[RF_PATH_A] += tmpval;
-
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][15] << 24);
- TxAGC[RF_PATH_B] += tmpval;
- }
- }
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- ptr = (u8 *)(&TxAGC[idx1]);
- for (idx2 = 0; idx2 < 4; idx2++) {
- if (*ptr > RF6052_MAX_TX_PWR)
- *ptr = RF6052_MAX_TX_PWR;
- ptr++;
- }
- }
- ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 1, &direction, &pwrtrac_value);
-
- if (direction == 1) {
- /* Increase TX power */
- TxAGC[0] += pwrtrac_value;
- TxAGC[1] += pwrtrac_value;
- } else if (direction == 2) {
- /* Decrease TX power */
- TxAGC[0] -= pwrtrac_value;
- TxAGC[1] -= pwrtrac_value;
- }
-
- /* rf-A cck tx power */
- tmpval = TxAGC[RF_PATH_A] & 0xff;
- rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
- tmpval = TxAGC[RF_PATH_A] >> 8;
- rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
-
- /* rf-B cck tx power */
- tmpval = TxAGC[RF_PATH_B] >> 24;
- rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
- tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
- rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
-} /* PHY_RF6052SetCckTxPower */
-
-/* */
-/* powerbase0 for OFDM rates */
-/* powerbase1 for HT MCS rates */
-/* */
-static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
- u8 *pPowerLevelBW20, u8 *pPowerLevelBW40, u8 Channel, u32 *OfdmBase, u32 *MCSBase)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u32 powerBase0, powerBase1;
- u8 i;
-
- for (i = 0; i < 2; i++) {
- powerBase0 = pPowerLevelOFDM[i];
-
- powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0;
- *(OfdmBase + i) = powerBase0;
- }
-
- /* Check HT20 to HT40 diff */
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- powerBase1 = pPowerLevelBW20[0];
- else
- powerBase1 = pPowerLevelBW40[0];
- powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
- *MCSBase = powerBase1;
-}
-
-static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
- u8 index, u32 *powerBase0, u32 *powerBase1,
- u32 *pOutWriteVal)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
- s8 pwr_diff = 0;
- u32 writeVal, customer_limit, rf;
- u8 Regulatory = pHalData->EEPROMRegulatory;
-
- /* Index 0 & 1= legacy OFDM, 2-5=HT_MCS rate */
-
- for (rf = 0; rf < 2; rf++) {
- switch (Regulatory) {
- case 0: /* Realtek better performance */
- /* increase power diff defined by Realtek for large power */
- chnlGroup = 0;
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- case 1: /* Realtek regulatory */
- /* increase power diff defined by Realtek for regulatory */
- if (pHalData->pwrGroupCnt == 1)
- chnlGroup = 0;
- if (pHalData->pwrGroupCnt >= MAX_PG_GROUP) {
- if (Channel < 3) /* Channel 1-2 */
- chnlGroup = 0;
- else if (Channel < 6) /* Channel 3-5 */
- chnlGroup = 1;
- else if (Channel < 9) /* Channel 6-8 */
- chnlGroup = 2;
- else if (Channel < 12) /* Channel 9-11 */
- chnlGroup = 3;
- else if (Channel < 14) /* Channel 12-13 */
- chnlGroup = 4;
- else if (Channel == 14) /* Channel 14 */
- chnlGroup = 5;
- }
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- case 2: /* Better regulatory */
- /* don't increase any power diff */
- writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- case 3: /* Customer defined power diff. */
- /* increase power diff defined by customer. */
- chnlGroup = 0;
-
- if (index < 2)
- pwr_diff = pHalData->TxPwrLegacyHtDiff[rf][Channel - 1];
- else if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- pwr_diff = pHalData->TxPwrHt20Diff[rf][Channel - 1];
-
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40)
- customer_pwr_limit = pHalData->PwrGroupHT40[rf][Channel - 1];
- else
- customer_pwr_limit = pHalData->PwrGroupHT20[rf][Channel - 1];
-
- if (pwr_diff >= customer_pwr_limit)
- pwr_diff = 0;
- else
- pwr_diff = customer_pwr_limit - pwr_diff;
-
- for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8));
-
- if (pwr_diff_limit[i] > pwr_diff)
- pwr_diff_limit[i] = pwr_diff;
- }
- customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) |
- (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
- writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- default:
- chnlGroup = 0;
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- }
-
- *(pOutWriteVal + rf) = writeVal;
- }
-}
-static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
-{
- u16 regoffset_a[6] = {
- rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
- rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
- rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12};
- u16 regoffset_b[6] = {
- rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24,
- rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04,
- rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12};
- u8 i, rf, pwr_val[4];
- u32 writeVal;
- u16 regoffset;
-
- for (rf = 0; rf < 2; rf++) {
- writeVal = pValue[rf];
- for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> (i * 8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
- pwr_val[i] = RF6052_MAX_TX_PWR;
- }
- writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0];
-
- if (rf == 0)
- regoffset = regoffset_a[index];
- else
- regoffset = regoffset_b[index];
-
- rtl8188e_PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
-
- /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
- if (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04) {
- writeVal = pwr_val[3];
- if (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_A_Mcs07_Mcs04)
- regoffset = 0xc90;
- if (regoffset == rTxAGC_B_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs07_Mcs04)
- regoffset = 0xc98;
- for (i = 0; i < 3; i++) {
- if (i != 2)
- writeVal = (writeVal > 8) ? (writeVal - 8) : 0;
- else
- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
- rtw_write8(Adapter, (u32)(regoffset + i), (u8)writeVal);
- }
- }
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetOFDMTxPower
- *
- * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
- * different channel and read original value in TX power register area from
- * 0xe00. We increase offset and original value to be correct tx pwr.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/05/2008 MHC Simulate 8192 series method.
- * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
- * A/B pwr difference or legacy/HT pwr diff.
- * 2. We concern with path B legacy/HT OFDM difference.
- * 01/22/2009 MHC Support new EPRO format from SD3.
- *
- *---------------------------------------------------------------------------*/
-
-void
-rtl8188e_PHY_RF6052SetOFDMTxPower(
- struct adapter *Adapter,
- u8 *pPowerLevelOFDM,
- u8 *pPowerLevelBW20,
- u8 *pPowerLevelBW40,
- u8 Channel)
-{
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u32 writeVal[2], powerBase0[2], powerBase1[2], pwrtrac_value;
- u8 direction;
- u8 index = 0;
-
- getpowerbase88e(Adapter, pPowerLevelOFDM, pPowerLevelBW20, pPowerLevelBW40, Channel, &powerBase0[0], &powerBase1[0]);
-
- /* 2012/04/23 MH According to power tracking value, we need to revise OFDM tx power. */
- /* This is ued to fix unstable power tracking mode. */
- ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 0, &direction, &pwrtrac_value);
-
- for (index = 0; index < 6; index++) {
- get_rx_power_val_by_reg(Adapter, Channel, index,
- &powerBase0[0], &powerBase1[0],
- &writeVal[0]);
-
- if (direction == 1) {
- writeVal[0] += pwrtrac_value;
- writeVal[1] += pwrtrac_value;
- } else if (direction == 2) {
- writeVal[0] -= pwrtrac_value;
- writeVal[1] -= pwrtrac_value;
- }
- writeOFDMPowerReg88E(Adapter, index, &writeVal[0]);
- }
-}
-
-int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
-{
- struct bb_reg_def *pPhyReg;
- struct hal_data_8188e *pHalData = &Adapter->haldata;
- u32 u4RegValue = 0;
- int err;
-
- /* Initialize RF */
-
- pPhyReg = &pHalData->PHYRegDef;
-
- /*----Store original RFENV control type----*/
- u4RegValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
-
- /*----Set RF_ENV enable----*/
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Set RF_ENV output high----*/
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /* Set bit number of Address and Data for RF register */
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Initialize RF fom connfiguration file----*/
- err = ODM_ReadAndConfig_RadioA_1T_8188E(&pHalData->odmpriv);
-
- /*----Restore RFENV control type----*/;
- rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
-
- return err;
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
deleted file mode 100644
index d1ac2960f1c4..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_REDESC_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-
-static void process_rssi(struct adapter *padapter, struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
-
- if (signal_stat->update_req) {
- signal_stat->total_num = 0;
- signal_stat->total_val = 0;
- signal_stat->update_req = 0;
- }
-
- signal_stat->total_num++;
- signal_stat->total_val += pattrib->phy_info.SignalStrength;
- signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
-} /* Process_UI_RSSI_8192C */
-
-static void process_link_qual(struct adapter *padapter, struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib;
- struct signal_stat *signal_stat;
-
- if (!prframe || !padapter)
- return;
-
- pattrib = &prframe->attrib;
- signal_stat = &padapter->recvpriv.signal_qual_data;
-
- if (signal_stat->update_req) {
- signal_stat->total_num = 0;
- signal_stat->total_val = 0;
- signal_stat->update_req = 0;
- }
-
- signal_stat->total_num++;
- signal_stat->total_val += pattrib->phy_info.SignalQuality;
- signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
-}
-
-static void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
-{
- struct recv_frame *precvframe = (struct recv_frame *)prframe;
-
- /* Check RSSI */
- process_rssi(padapter, precvframe);
- /* Check EVM */
- process_link_qual(padapter, precvframe);
-}
-
-void update_recvframe_attrib_88e(struct recv_frame *precvframe, struct recv_stat *prxstat)
-{
- struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
-
- pattrib->crc_err = (le32_to_cpu(prxstat->rxdw0) >> 14) & 0x1;
-
- pattrib->pkt_rpt_type = (le32_to_cpu(prxstat->rxdw3) >> 14) & 0x3;
-
- if (pattrib->pkt_rpt_type == NORMAL_RX) {
- pattrib->pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x00003fff;
- pattrib->icv_err = (le32_to_cpu(prxstat->rxdw0) >> 15) & 0x1;
- pattrib->drvinfo_sz = ((le32_to_cpu(prxstat->rxdw0) >> 16) & 0xf) * 8;
- pattrib->encrypt = (u8)((le32_to_cpu(prxstat->rxdw0) >> 20) & 0x7);
- pattrib->qos = (le32_to_cpu(prxstat->rxdw0) >> 23) & 0x1;
- pattrib->shift_sz = (le32_to_cpu(prxstat->rxdw0) >> 24) & 0x3;
- pattrib->physt = (le32_to_cpu(prxstat->rxdw0) >> 26) & 0x1;
- pattrib->bdecrypted = (le32_to_cpu(prxstat->rxdw0) & BIT(27)) ? 0 : 1;
-
- pattrib->priority = (le32_to_cpu(prxstat->rxdw1) >> 8) & 0xf;
- pattrib->amsdu = (le32_to_cpu(prxstat->rxdw1) >> 13) & 0x1;
- pattrib->mdata = (le32_to_cpu(prxstat->rxdw1) >> 26) & 0x1;
- pattrib->mfrag = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1;
-
- pattrib->seq_num = le32_to_cpu(prxstat->rxdw2) & 0x00000fff;
- pattrib->frag_num = (le32_to_cpu(prxstat->rxdw2) >> 12) & 0xf;
-
- pattrib->mcs_rate = le32_to_cpu(prxstat->rxdw3) & 0x3f;
- pattrib->rxht = (le32_to_cpu(prxstat->rxdw3) >> 6) & 0x1;
-
- } else if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX */
- pattrib->pkt_len = TX_RPT1_PKT_LEN;
- } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
- pattrib->pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x3FF;
-
- pattrib->MacIDValidEntry[0] = le32_to_cpu(prxstat->rxdw4);
- pattrib->MacIDValidEntry[1] = le32_to_cpu(prxstat->rxdw5);
-
- } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- pattrib->pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x00003fff;
- }
-}
-
-/*
- * Notice:
- * Before calling this function,
- * precvframe->rx_data should be ready!
- */
-void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat *pphy_status)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precvframe->rx_data;
- struct adapter *padapter = precvframe->adapter;
- struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- struct hal_data_8188e *pHalData = &padapter->haldata;
- struct phy_info *pPHYInfo = &pattrib->phy_info;
- u8 *wlanhdr = precvframe->rx_data;
- struct odm_per_pkt_info pkt_info;
- u8 *sa = NULL;
- struct sta_priv *pstapriv;
- struct sta_info *psta;
-
- pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(hdr->frame_control)) &&
- !pattrib->icv_err && !pattrib->crc_err &&
- !memcmp(get_hdr_bssid(wlanhdr),
- get_bssid(&padapter->mlmepriv), ETH_ALEN));
-
- pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
- ether_addr_equal(ieee80211_get_DA(hdr),
- myid(&padapter->eeprompriv));
-
- pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
- ieee80211_is_beacon(hdr->frame_control);
- if (pkt_info.bPacketBeacon) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
- sa = padapter->mlmepriv.cur_network.network.MacAddress;
- /* to do Ad-hoc */
- } else {
- sa = ieee80211_get_SA(hdr);
- }
-
- pstapriv = &padapter->stapriv;
- pkt_info.StationID = 0xFF;
- psta = rtw_get_stainfo(pstapriv, sa);
- if (psta)
- pkt_info.StationID = psta->mac_id;
- pkt_info.Rate = pattrib->mcs_rate;
-
- ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info), padapter);
-
- precvframe->psta = NULL;
- if (pkt_info.bPacketMatchBSSID &&
- (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
- if (psta) {
- precvframe->psta = psta;
- rtl8188e_process_phy_info(padapter, precvframe);
- }
- } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
- if (psta)
- precvframe->psta = psta;
- }
- rtl8188e_process_phy_info(padapter, precvframe);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
deleted file mode 100644
index 3ffab4953a5c..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ /dev/null
@@ -1,627 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_XMIT_C_
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-#include "../include/rtl8188e_hal.h"
-
-static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
-{
- u16 *usptr = (u16 *)ptxdesc;
- u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
- u32 index;
- u16 checksum = 0;
-
- /* Clear first */
- ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
-
- for (index = 0; index < count; index++)
- checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
- ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
-}
-
-/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
-/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
-/* Fw can tell Hw to send these packet derectly. */
-void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
-{
- struct tx_desc *ptxdesc;
-
- /* Clear all status */
- ptxdesc = (struct tx_desc *)desc;
- memset(desc, 0, TXDESC_SIZE);
-
- /* offset 0 */
- ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
-
- ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00ff0000); /* 32 bytes for TX Desc */
-
- ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff); /* Buffer size + command header */
-
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00); /* Fixed queue of Mgnt queue */
-
- /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
- if (ispspoll) {
- ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
- } else {
- ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /* Hw set sequence number */
- ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
- }
-
- if (is_btqosnull)
- ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
-
- /* offset 16 */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
-
- /* USB interface drop packet if the checksum of descriptor isn't correct. */
- /* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
- rtl8188eu_cal_txdesc_chksum(ptxdesc);
-}
-
-static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
-{
- if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
- switch (pattrib->encrypt) {
- /* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
- case _WEP40_:
- case _WEP104_:
- ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
- ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
- break;
- case _TKIP_:
- case _TKIP_WTMIC_:
- ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
- ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
- break;
- case _AES_:
- ptxdesc->txdw1 |= cpu_to_le32((0x03 << SEC_TYPE_SHT) & 0x00c00000);
- ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
- break;
- case _NO_PRIVACY_:
- default:
- break;
- }
- }
-}
-
-static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
-{
- switch (pattrib->vcs_mode) {
- case RTS_CTS:
- *pdw |= cpu_to_le32(RTS_EN);
- break;
- case CTS_TO_SELF:
- *pdw |= cpu_to_le32(CTS_2_SELF);
- break;
- case NONE_VCS:
- default:
- break;
- }
- if (pattrib->vcs_mode) {
- *pdw |= cpu_to_le32(HW_RTS_EN);
- /* Set RTS BW */
- if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
-
- if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- *pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- *pdw |= 0;
- else
- *pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
- }
- }
-}
-
-static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
-{
- if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
-
- if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- *pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- *pdw |= 0;
- else
- *pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
- }
-}
-
-static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz)
-{
- uint qsel;
- u8 data_rate, pwr_status, offset;
- struct adapter *adapt = pxmitframe->padapter;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- memset(ptxdesc, 0, sizeof(struct tx_desc));
-
- /* 4 offset 0 */
- ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
- ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
-
- offset = TXDESC_SIZE + OFFSET_SZ;
-
- ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
-
- if (is_multicast_ether_addr(pattrib->ra))
- ptxdesc->txdw0 |= cpu_to_le32(BMC);
-
- /* pkt_offset, unit:8 bytes padding */
- if (pxmitframe->pkt_offset > 0)
- ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
-
- /* driver uses rate */
- ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
-
- if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
-
- qsel = (uint)(pattrib->qsel & 0x0000001f);
- ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
-
- ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
-
- fill_txdesc_sectype(pattrib, ptxdesc);
-
- if (pattrib->ampdu_en) {
- ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
- ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
- } else {
- ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
- }
-
- /* offset 8 */
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
-
- /* offset 16 , offset 20 */
- if (pattrib->qos_en)
- ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
-
- /* offset 20 */
- if (pxmitframe->agg_num > 1)
- ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
-
- if ((pattrib->ether_type != 0x888e) &&
- (pattrib->ether_type != 0x0806) &&
- (pattrib->ether_type != 0x88b4) &&
- (pattrib->dhcp_pkt != 1)) {
- /* Non EAP & ARP & DHCP type data packet */
-
- fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
- fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
-
- ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
- ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
-
- if (pattrib->ht_en) {
- if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
- ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
- }
- data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
- ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
- pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
- ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
- } else {
- /* EAP data packet and ARP packet and DHCP. */
- /* Use the 1M data rate to send the EAP/ARP packet. */
- /* This will maybe make the handshake smooth. */
- ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
- if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
- ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
- }
- } else if ((pxmitframe->frame_tag & 0x0f) == MGNT_FRAMETAG) {
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
-
- qsel = (uint)(pattrib->qsel & 0x0000001f);
- ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
-
- ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
-
- /* offset 8 */
- /* CCX-TXRPT ack for xmit mgmt frames. */
- if (pxmitframe->ack_report)
- ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
-
- /* offset 20 */
- ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
- if (pattrib->retry_ctrl)
- ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
- else
- ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
-
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
- } else if ((pxmitframe->frame_tag & 0x0f) != TXAGG_FRAMETAG) {
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
-
- ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
-
- /* offset 8 */
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0fff0000);
-
- /* offset 20 */
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
- }
-
- /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
- /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
- /* mgnt frame should be controlled by Hw because Fw will also send null data */
- /* which we cannot control when Fw LPS enable. */
- /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
- /* 2010.06.23. Added by tynli. */
- if (!pattrib->qos_en) {
- ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /* Hw set sequence number */
- ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
- }
-
- ODM_SetTxAntByTxInfo_88E(&haldata->odmpriv, pmem, pattrib->mac_id);
-
- rtl8188eu_cal_txdesc_chksum(ptxdesc);
- return 0;
-}
-
-/* for non-agg data frame or management frame */
-static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- s32 ret = _SUCCESS;
- s32 inner_ret = _SUCCESS;
- int t, sz, w_sz, pull = 0;
- u8 *mem_addr;
- u32 ff_hwaddr;
- struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- struct security_priv *psecuritypriv = &adapt->securitypriv;
- if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
- (pxmitframe->attrib.ether_type != 0x0806) &&
- (pxmitframe->attrib.ether_type != 0x888e) &&
- (pxmitframe->attrib.ether_type != 0x88b4) &&
- (pxmitframe->attrib.dhcp_pkt != 1))
- rtw_issue_addbareq_cmd(adapt, pxmitframe);
- mem_addr = pxmitframe->buf_addr;
-
- for (t = 0; t < pattrib->nr_frags; t++) {
- if (inner_ret != _SUCCESS && ret == _SUCCESS)
- ret = _FAIL;
-
- if (t != (pattrib->nr_frags - 1)) {
- sz = pxmitpriv->frag_len;
- sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
- } else {
- /* no frag */
- sz = pattrib->last_txcmdsz;
- }
-
- pull = update_txdesc(pxmitframe, mem_addr, sz);
-
- if (pull) {
- mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
- pxmitframe->buf_addr = mem_addr;
- w_sz = sz + TXDESC_SIZE;
- } else {
- w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
- }
- ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
-
- inner_ret = rtw_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
-
- rtw_count_tx_stats(adapt, pxmitframe, sz);
-
- mem_addr += w_sz;
-
- mem_addr = PTR_ALIGN(mem_addr, 4);
- }
-
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
- if (ret != _SUCCESS)
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
-
- return ret;
-}
-
-static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
-{
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- u32 len = 0;
-
- /* no consider fragement */
- len = pattrib->hdrlen + pattrib->iv_len +
- SNAP_SIZE + sizeof(u16) +
- pattrib->pktlen +
- ((pattrib->bswenc) ? pattrib->icv_len : 0);
-
- if (pattrib->encrypt == _TKIP_)
- len += 8;
-
- return len;
-}
-
-bool rtl8188eu_xmitframe_complete(struct adapter *adapt)
-{
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
- struct xmit_frame *pxmitframe = NULL;
- struct xmit_frame *pfirstframe = NULL;
- struct xmit_buf *pxmitbuf;
-
- /* aggregate variable */
- struct hw_xmit *phwxmit;
- struct sta_info *psta = NULL;
- struct tx_servq *ptxservq = NULL;
- struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
-
- u32 pbuf; /* next pkt address */
- u32 pbuf_tail; /* last pkt tail */
- u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
-
- u32 bulksize;
- u8 desc_cnt;
- u32 bulkptr;
-
- /* dump frame variable */
- u32 ff_hwaddr;
-
- if (pdvobjpriv->pusbdev->speed == USB_SPEED_HIGH)
- bulksize = USB_HIGH_SPEED_BULK_SIZE;
- else
- bulksize = USB_FULL_SPEED_BULK_SIZE;
-
- pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (!pxmitbuf)
- return false;
-
- pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits);
- if (!pxmitframe) {
- /* no more xmit frame, release xmit buffer */
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
- return false;
- }
-
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pxmitframe;
-
- pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
- pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
-
- rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
-
- /* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_xmit_complete(adapt, pxmitframe);
-
- /* 3 2. aggregate same priority and same DA(AP or STA) frames */
- pfirstframe = pxmitframe;
- len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
- pbuf_tail = len;
- pbuf = round_up(pbuf_tail, 8);
-
- /* check pkt amount in one bulk */
- desc_cnt = 0;
- bulkptr = bulksize;
- if (pbuf < bulkptr) {
- desc_cnt++;
- } else {
- desc_cnt = 0;
- bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */
- }
-
- /* dequeue same priority packet from station tx queue */
- psta = pfirstframe->attrib.psta;
- switch (pfirstframe->attrib.priority) {
- case 1:
- case 2:
- ptxservq = &psta->sta_xmitpriv.bk_q;
- phwxmit = pxmitpriv->hwxmits + 3;
- break;
- case 4:
- case 5:
- ptxservq = &psta->sta_xmitpriv.vi_q;
- phwxmit = pxmitpriv->hwxmits + 1;
- break;
- case 6:
- case 7:
- ptxservq = &psta->sta_xmitpriv.vo_q;
- phwxmit = pxmitpriv->hwxmits;
- break;
- case 0:
- case 3:
- default:
- ptxservq = &psta->sta_xmitpriv.be_q;
- phwxmit = pxmitpriv->hwxmits + 2;
- break;
- }
- spin_lock_bh(&pxmitpriv->lock);
-
- xmitframe_phead = &ptxservq->sta_pending;
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = xmitframe_plist->next;
-
- pxmitframe->agg_num = 0; /* not first frame of aggregation */
- pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
-
- len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
-
- if (pbuf + len > MAX_XMITBUF_SZ) {
- pxmitframe->agg_num = 1;
- pxmitframe->pkt_offset = 1;
- break;
- }
- list_del_init(&pxmitframe->list);
- ptxservq->qcnt--;
- phwxmit->accnt--;
-
- pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
-
- rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
- /* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_xmit_complete(adapt, pxmitframe);
-
- /* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
- update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz);
-
- /* don't need xmitframe any more */
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
- /* handle pointer and stop condition */
- pbuf_tail = pbuf + len;
- pbuf = round_up(pbuf_tail, 8);
-
- pfirstframe->agg_num++;
- if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
- break;
-
- if (pbuf < bulkptr) {
- desc_cnt++;
- if (desc_cnt == USB_TXAGG_DESC_NUM)
- break;
- } else {
- desc_cnt = 0;
- bulkptr = ((pbuf / bulksize) + 1) * bulksize;
- }
- } /* end while (aggregate same priority and same DA(AP or STA) frames) */
-
- if (list_empty(&ptxservq->sta_pending))
- list_del_init(&ptxservq->tx_pending);
-
- spin_unlock_bh(&pxmitpriv->lock);
- if ((pfirstframe->attrib.ether_type != 0x0806) &&
- (pfirstframe->attrib.ether_type != 0x888e) &&
- (pfirstframe->attrib.ether_type != 0x88b4) &&
- (pfirstframe->attrib.dhcp_pkt != 1))
- rtw_issue_addbareq_cmd(adapt, pfirstframe);
- /* 3 3. update first frame txdesc */
- if ((pbuf_tail % bulksize) == 0) {
- /* remove pkt_offset */
- pbuf_tail -= PACKET_OFFSET_SZ;
- pfirstframe->buf_addr += PACKET_OFFSET_SZ;
- pfirstframe->pkt_offset--;
- }
-
- update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz);
-
- /* 3 4. write xmit buffer to USB FIFO */
- ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
- rtw_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
-
- /* 3 5. update statisitc */
- pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
- pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
-
- rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
-
- rtw_free_xmitframe(pxmitpriv, pfirstframe);
-
- return true;
-}
-
-static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- s32 res = _SUCCESS;
-
- res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
- if (res == _SUCCESS)
- rtw_dump_xframe(adapt, pxmitframe);
-
- return res;
-}
-
-/*
- * Return
- * true dump packet directly
- * false enqueue packet
- */
-static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- s32 res;
- struct xmit_buf *pxmitbuf = NULL;
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
- goto enqueue;
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING))
- goto enqueue;
-
- pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (!pxmitbuf)
- goto enqueue;
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pxmitframe;
-
- if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
- }
-
- return true;
-
-enqueue:
- res = rtw_xmit_classifier(adapt, pxmitframe);
- spin_unlock_bh(&pxmitpriv->lock);
-
- if (res != _SUCCESS) {
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
- /* Trick, make the statistics correct */
- pxmitpriv->tx_pkts--;
- pxmitpriv->tx_drop++;
- return true;
- }
-
- return false;
-}
-
-s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
-{
- return rtw_dump_xframe(adapt, pmgntframe);
-}
-
-/*
- * Return
- * true dump packet directly ok
- * false temporary can't transmit packets to hardware
- */
-s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- return pre_xmitframe(adapt, pxmitframe);
-}
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
deleted file mode 100644
index a1051ac1cac4..000000000000
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ /dev/null
@@ -1,1069 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _HCI_HAL_INIT_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_efuse.h"
-#include "../include/rtw_fw.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtw_iol.h"
-#include "../include/usb_ops.h"
-#include "../include/usb_osintf.h"
-#include "../include/HalPwrSeqCmd.h"
-
-static void one_out_pipe(struct adapter *adapter)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
-}
-
-static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- /* 0:H, 1:L */
-
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
-
- if (wifi_cfg) {
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
- } else {
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
- }
-}
-
-static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- /* 0:H, 1:N, 2:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
-
- pdvobjpriv->Queue2Pipe[3] = wifi_cfg ?
- pdvobjpriv->RtOutPipe[1] : pdvobjpriv->RtOutPipe[2];/* BK */
-}
-
-int rtl8188eu_interface_configure(struct adapter *adapt)
-{
- struct registry_priv *pregistrypriv = &adapt->registrypriv;
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
- struct hal_data_8188e *haldata = &adapt->haldata;
- bool wifi_cfg = pregistrypriv->wifi_spec;
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- switch (pdvobjpriv->RtNumOutPipes) {
- case 3:
- haldata->out_ep_extra_queues = TX_SELE_LQ | TX_SELE_NQ;
- three_out_pipe(adapt, wifi_cfg);
- break;
- case 2:
- haldata->out_ep_extra_queues = TX_SELE_NQ;
- two_out_pipe(adapt, wifi_cfg);
- break;
- case 1:
- one_out_pipe(adapt);
- break;
- default:
- return -ENXIO;
- }
-
- return 0;
-}
-
-u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
-{
- u16 value16;
- int res;
-
- /* HW Power on sequence */
- struct hal_data_8188e *haldata = &adapt->haldata;
- if (haldata->bMacPwrCtrlOn)
- return _SUCCESS;
-
- if (!HalPwrSeqCmdParsing(adapt, PWR_ON_FLOW))
- return _FAIL;
-
- /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
- /* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
- rtw_write16(adapt, REG_CR, 0x00); /* suggseted by zhouzhou, by page, 20111230 */
-
- /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
- res = rtw_read16(adapt, REG_CR, &value16);
- if (res)
- return _FAIL;
-
- value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN
- | PROTOCOL_EN | SCHEDULE_EN | ENSEC | CALTMR_EN);
- /* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
-
- rtw_write16(adapt, REG_CR, value16);
- haldata->bMacPwrCtrlOn = true;
-
- return _SUCCESS;
-}
-
-/* Shall USB interface init this? */
-static void _InitInterrupt(struct adapter *Adapter)
-{
- u32 imr, imr_ex;
- u8 usb_opt;
- int res;
-
- /* HISR write one to clear */
- rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
- /* HIMR - */
- imr = IMR_PSTIMEOUT_88E | IMR_TBDER_88E | IMR_CPWM_88E | IMR_CPWM2_88E;
- rtw_write32(Adapter, REG_HIMR_88E, imr);
-
- imr_ex = IMR_TXERR_88E | IMR_RXERR_88E | IMR_TXFOVW_88E | IMR_RXFOVW_88E;
- rtw_write32(Adapter, REG_HIMRE_88E, imr_ex);
-
- /* REG_USB_SPECIAL_OPTION - BIT(4) */
- /* 0; Use interrupt endpoint to upload interrupt pkt */
- /* 1; Use bulk endpoint to upload interrupt pkt, */
- res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &usb_opt);
- if (res)
- return;
-
- if (adapter_to_dvobj(Adapter)->pusbdev->speed == USB_SPEED_HIGH)
- usb_opt = usb_opt | (INT_BULK_SEL);
- else
- usb_opt = usb_opt & (~INT_BULK_SEL);
-
- rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, usb_opt);
-}
-
-static void _InitQueueReservedPage(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u8 numLQ = 0;
- u8 numNQ = 0;
- u8 numPubQ;
-
- if (pregistrypriv->wifi_spec) {
- if (haldata->out_ep_extra_queues & TX_SELE_LQ)
- numLQ = 0x1C;
-
- /* NOTE: This step shall be proceed before writing REG_RQPN. */
- if (haldata->out_ep_extra_queues & TX_SELE_NQ)
- numNQ = 0x1C;
-
- rtw_write8(Adapter, REG_RQPN_NPQ, numNQ);
-
- numPubQ = 0xA8 - NUM_HQ - numLQ - numNQ;
-
- /* TX DMA */
- rtw_write32(Adapter, REG_RQPN, LD_RQPN | numPubQ << 16 | numLQ << 8 | NUM_HQ);
- } else {
- rtw_write16(Adapter, REG_RQPN_NPQ, 0x0000);/* Just follow MP Team,??? Georgia 03/28 */
- rtw_write16(Adapter, REG_RQPN_NPQ, 0x0d);
- rtw_write32(Adapter, REG_RQPN, 0x808E000d);/* reserve 7 page for LPS */
- }
-}
-
-static void _InitTxBufferBoundary(struct adapter *Adapter, u8 txpktbuf_bndy)
-{
- rtw_write8(Adapter, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
- rtw_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
- rtw_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
- rtw_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
- rtw_write8(Adapter, REG_TDECTRL + 1, txpktbuf_bndy);
-}
-
-static void _InitPageBoundary(struct adapter *Adapter)
-{
- /* RX Page Boundary */
- /* */
- u16 rxff_bndy = MAX_RX_DMA_BUFFER_SIZE_88E - 1;
-
- rtw_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
-}
-
-static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
- u16 bkQ, u16 viQ, u16 voQ, u16 mgtQ,
- u16 hiQ)
-{
- u16 value16;
- int res;
-
- res = rtw_read16(Adapter, REG_TRXDMA_CTRL, &value16);
- if (res)
- return;
-
- value16 &= 0x7;
-
- value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
- _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
- _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
-
- rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
-}
-
-static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
-{
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 bkQ, voQ;
-
- if (!pregistrypriv->wifi_spec) {
- bkQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- bkQ = QUEUE_HIGH;
- voQ = QUEUE_NORMAL;
- }
- _InitNormalChipRegPriority(Adapter, QUEUE_NORMAL, bkQ, QUEUE_HIGH,
- voQ, QUEUE_HIGH, QUEUE_HIGH);
-}
-
-static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
-{
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
-
- if (!pregistrypriv->wifi_spec) {/* typical setting */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_LOW;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
- } else {/* for WMM */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_NORMAL;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
- }
- _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
-}
-
-static void _InitQueuePriority(struct adapter *Adapter)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
-
- switch (pdvobjpriv->RtNumOutPipes) {
- case 1:
- _InitNormalChipRegPriority(Adapter, QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH,
- QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH);
- break;
- case 2:
- _InitNormalChipTwoOutEpPriority(Adapter);
- break;
- case 3:
- _InitNormalChipThreeOutEpPriority(Adapter);
- break;
- default:
- break;
- }
-}
-
-static void _InitNetworkType(struct adapter *Adapter)
-{
- u32 value32;
- int res;
-
- res = rtw_read32(Adapter, REG_CR, &value32);
- if (res)
- return;
-
- /* TODO: use the other function to set network type */
- value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
-
- rtw_write32(Adapter, REG_CR, value32);
-}
-
-static void _InitTransferPageSize(struct adapter *Adapter)
-{
- /* Tx page size is always 128. */
-
- u8 value8;
- value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
- rtw_write8(Adapter, REG_PBP, value8);
-}
-
-static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
-{
- rtw_write8(Adapter, REG_RX_DRVINFO_SZ, drvInfoSize);
-}
-
-static void _InitWMACSetting(struct adapter *Adapter)
-{
- u32 receive_config = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
- RCR_CBSSID_DATA | RCR_CBSSID_BCN |
- RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
- RCR_APP_MIC | RCR_APP_PHYSTS;
-
- /* some REG_RCR will be modified later by phy_ConfigMACWithHeaderFile() */
- rtw_write32(Adapter, REG_RCR, receive_config);
-
- /* Accept all multicast address */
- rtw_write32(Adapter, REG_MAR, 0xFFFFFFFF);
- rtw_write32(Adapter, REG_MAR + 4, 0xFFFFFFFF);
-}
-
-static void _InitAdaptiveCtrl(struct adapter *Adapter)
-{
- u16 value16;
- u32 value32;
- int res;
-
- /* Response Rate Set */
- res = rtw_read32(Adapter, REG_RRSR, &value32);
- if (res)
- return;
-
- value32 &= ~RATE_BITMAP_ALL;
- value32 |= RATE_RRSR_CCK_ONLY_1M;
- rtw_write32(Adapter, REG_RRSR, value32);
-
- /* CF-END Threshold */
-
- /* SIFS (used in NAV) */
- value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
- rtw_write16(Adapter, REG_SPEC_SIFS, value16);
-
- /* Retry Limit */
- value16 = _LRL(0x30) | _SRL(0x30);
- rtw_write16(Adapter, REG_RL, value16);
-}
-
-static void _InitEDCA(struct adapter *Adapter)
-{
- /* Set Spec SIFS (used in NAV) */
- rtw_write16(Adapter, REG_SPEC_SIFS, 0x100a);
- rtw_write16(Adapter, REG_MAC_SPEC_SIFS, 0x100a);
-
- /* Set SIFS for CCK */
- rtw_write16(Adapter, REG_SIFS_CTX, 0x100a);
-
- /* Set SIFS for OFDM */
- rtw_write16(Adapter, REG_SIFS_TRX, 0x100a);
-
- /* TXOP */
- rtw_write32(Adapter, REG_EDCA_BE_PARAM, 0x005EA42B);
- rtw_write32(Adapter, REG_EDCA_BK_PARAM, 0x0000A44F);
- rtw_write32(Adapter, REG_EDCA_VI_PARAM, 0x005EA324);
- rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
-}
-
-static void _InitRetryFunction(struct adapter *Adapter)
-{
- u8 value8;
- int res;
-
- res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL, &value8);
- if (res)
- return;
-
- value8 |= EN_AMPDU_RTY_NEW;
- rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
-
- /* Set ACK timeout */
- rtw_write8(Adapter, REG_ACKTO, 0x40);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: usb_AggSettingTxUpdate()
- *
- * Overview: Separate TX/RX parameters update independent for TP detection and
- * dynamic TX/RX aggreagtion parameters update.
- *
- * Input: struct adapter *
- *
- * Output/Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 12/10/2010 MHC Separate to smaller function.
- *
- *---------------------------------------------------------------------------*/
-static void usb_AggSettingTxUpdate(struct adapter *Adapter)
-{
- u32 value32;
- int res;
-
- if (Adapter->registrypriv.wifi_spec)
- return;
-
- res = rtw_read32(Adapter, REG_TDECTRL, &value32);
- if (res)
- return;
-
- value32 = value32 & ~(BLK_DESC_NUM_MASK << BLK_DESC_NUM_SHIFT);
- value32 |= ((USB_TXAGG_DESC_NUM & BLK_DESC_NUM_MASK) << BLK_DESC_NUM_SHIFT);
-
- rtw_write32(Adapter, REG_TDECTRL, value32);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: usb_AggSettingRxUpdate()
- *
- * Overview: Separate TX/RX parameters update independent for TP detection and
- * dynamic TX/RX aggreagtion parameters update.
- *
- * Input: struct adapter *
- *
- * Output/Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 12/10/2010 MHC Separate to smaller function.
- *
- *---------------------------------------------------------------------------*/
-static void
-usb_AggSettingRxUpdate(
- struct adapter *Adapter
- )
-{
- u8 valueDMA;
- u8 valueUSB;
- int res;
-
- res = rtw_read8(Adapter, REG_TRXDMA_CTRL, &valueDMA);
- if (res)
- return;
-
- res = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION, &valueUSB);
- if (res)
- return;
-
- valueDMA |= RXDMA_AGG_EN;
- valueUSB &= ~USB_AGG_EN;
-
- rtw_write8(Adapter, REG_TRXDMA_CTRL, valueDMA);
- rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, valueUSB);
-
- rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, USB_RXAGG_PAGE_COUNT);
- rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH + 1, USB_RXAGG_PAGE_TIMEOUT);
-}
-
-static void InitUsbAggregationSetting(struct adapter *Adapter)
-{
- /* Tx aggregation setting */
- usb_AggSettingTxUpdate(Adapter);
-
- /* Rx aggregation setting */
- usb_AggSettingRxUpdate(Adapter);
-}
-
-/* FIXME: add error handling in callers */
-static int _InitBeaconParameters(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- int res;
-
- rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
-
- /* TODO: Remove these magic number */
- rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0x6404);/* ms */
- rtw_write8(Adapter, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/* 5ms */
- rtw_write8(Adapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME); /* 2ms */
-
- /* Suggested by designer timchen. Change beacon AIFS to the largest number */
- /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
- rtw_write16(Adapter, REG_BCNTCFG, 0x660F);
-
- res = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2, &haldata->RegFwHwTxQCtrl);
- if (res)
- return res;
-
- res = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2, &haldata->RegReg542);
- if (res)
- return res;
-
- res = rtw_read8(Adapter, REG_CR + 1, &haldata->RegCR_1);
- if (res)
- return res;
-
- return 0;
-}
-
-static void _BeaconFunctionEnable(struct adapter *Adapter)
-{
- rtw_write8(Adapter, REG_BCN_CTRL, (BIT(4) | BIT(3) | BIT(1)));
-
- rtw_write8(Adapter, REG_RD_CTRL + 1, 0x6F);
-}
-
-/* Set CCK and OFDM Block "ON" */
-static void _BBTurnOnBlock(struct adapter *Adapter)
-{
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
-}
-
-static void _InitAntenna_Selection(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
- int res;
- u32 reg;
-
- if (haldata->AntDivCfg == 0)
- return;
-
- res = rtw_read32(Adapter, REG_LEDCFG0, &reg);
- if (res)
- return;
-
- rtw_write32(Adapter, REG_LEDCFG0, reg | BIT(23));
- rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
-
- if (rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
- haldata->CurAntenna = Antenna_A;
- else
- haldata->CurAntenna = Antenna_B;
-}
-
-static void hw_var_set_macaddr(struct adapter *Adapter, u8 *val)
-{
- u8 idx = 0;
- u32 reg_macid;
-
- reg_macid = REG_MACID;
-
- for (idx = 0; idx < 6; idx++)
- rtw_write8(Adapter, (reg_macid + idx), val[idx]);
-}
-
-u32 rtl8188eu_hal_init(struct adapter *Adapter)
-{
- u8 value8 = 0;
- u16 value16;
- u32 status = _SUCCESS;
- int res;
- struct hal_data_8188e *haldata = &Adapter->haldata;
- struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 reg;
-
- if (Adapter->pwrctrlpriv.bkeepfwalive) {
- if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- PHY_IQCalibrate_8188E(Adapter, true);
- } else {
- PHY_IQCalibrate_8188E(Adapter, false);
- haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
- }
-
- ODM_TXPowerTrackingCheck(&haldata->odmpriv);
- PHY_LCCalibrate_8188E(Adapter);
-
- goto exit;
- }
-
- status = rtl8188eu_InitPowerOn(Adapter);
- if (status == _FAIL)
- goto exit;
-
- /* Save target channel */
- haldata->CurrentChannel = 6;/* default set to 6 */
-
- /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
- /* HW GPIO pin. Before PHY_RFConfig8192C. */
- /* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
-
- _InitQueueReservedPage(Adapter);
- _InitQueuePriority(Adapter);
- _InitPageBoundary(Adapter);
- _InitTransferPageSize(Adapter);
-
- _InitTxBufferBoundary(Adapter, 0);
-
- status = rtl8188e_firmware_download(Adapter);
-
- if (status != _SUCCESS) {
- Adapter->bFWReady = false;
- haldata->fw_ractrl = false;
- return status;
- } else {
- Adapter->bFWReady = true;
- haldata->fw_ractrl = false;
- }
- /* Initialize firmware vars */
- Adapter->pwrctrlpriv.bFwCurrentInPSMode = false;
- haldata->LastHMEBoxNum = 0;
-
- if (PHY_MACConfig8188E(Adapter))
- return _FAIL;
-
- /* */
- /* d. Initialize BB related configurations. */
- /* */
- if (PHY_BBConfig8188E(Adapter))
- return _FAIL;
-
- if (phy_RF6052_Config_ParaFile(Adapter))
- return _FAIL;
-
- status = rtl8188e_iol_efuse_patch(Adapter);
- if (status == _FAIL)
- goto exit;
-
- _InitTxBufferBoundary(Adapter, TX_PAGE_BOUNDARY_88E);
-
- status = InitLLTTable(Adapter, TX_PAGE_BOUNDARY_88E);
- if (status == _FAIL)
- goto exit;
-
- /* Get Rx PHY status in order to report RSSI and others. */
- _InitDriverInfoSize(Adapter, DRVINFO_SZ);
-
- _InitInterrupt(Adapter);
- hw_var_set_macaddr(Adapter, Adapter->eeprompriv.mac_addr);
- _InitNetworkType(Adapter);/* set msr */
- _InitWMACSetting(Adapter);
- _InitAdaptiveCtrl(Adapter);
- _InitEDCA(Adapter);
- _InitRetryFunction(Adapter);
- InitUsbAggregationSetting(Adapter);
- _InitBeaconParameters(Adapter);
-
- /* */
- /* Init CR MACTXEN, MACRXEN after setting RxFF boundary REG_TRXFF_BNDY to patch */
- /* Hw bug which Hw initials RxFF boundary size to a value which is larger than the real Rx buffer size in 88E. */
- /* */
- /* Enable MACTXEN/MACRXEN block */
- res = rtw_read16(Adapter, REG_CR, &value16);
- if (res)
- return _FAIL;
-
- value16 |= (MACTXEN | MACRXEN);
- rtw_write8(Adapter, REG_CR, value16);
-
- /* Enable TX Report */
- /* Enable Tx Report Timer */
- res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &value8);
- if (res)
- return _FAIL;
-
- rtw_write8(Adapter, REG_TX_RPT_CTRL, (value8 | BIT(1) | BIT(0)));
- /* Set MAX RPT MACID */
- rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
- /* Tx RPT Timer. Unit: 32us */
- rtw_write16(Adapter, REG_TX_RPT_TIME, 0xCdf0);
-
- rtw_write8(Adapter, REG_EARLY_MODE_CONTROL, 0);
-
- rtw_write16(Adapter, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
- rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
-
- /* Keep RfRegChnlVal for later use. */
- haldata->RfRegChnlVal = rtl8188e_PHY_QueryRFReg(Adapter, RF_CHNLBW, bRFRegOffsetMask);
-
- _BBTurnOnBlock(Adapter);
-
- invalidate_cam_all(Adapter);
-
- /* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
- PHY_SetTxPowerLevel8188E(Adapter, haldata->CurrentChannel);
-
-/* Move by Neo for USB SS to below setp */
-/* _RfPowerSave(Adapter); */
-
- _InitAntenna_Selection(Adapter);
-
- /* */
- /* Disable BAR, suggested by Scott */
- /* 2010.04.09 add by hpfan */
- /* */
- rtw_write32(Adapter, REG_BAR_MODE_CTRL, 0x0201ffff);
-
- /* HW SEQ CTRL */
- /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
- rtw_write8(Adapter, REG_HWSEQ_CTRL, 0xFF);
-
- if (pregistrypriv->wifi_spec)
- rtw_write16(Adapter, REG_FAST_EDCA_CTRL, 0);
-
- /* Nav limit , suggest by scott */
- rtw_write8(Adapter, 0x652, 0x0);
-
- rtl8188e_InitHalDm(Adapter);
-
- /* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
- /* and then decide to enable RF or not.!!!??? For Selective suspend mode. We may not */
- /* call initstruct adapter. May cause some problem?? */
- /* Fix the bug that Hw/Sw radio off before S3/S4, the RF off action will not be executed */
- /* in MgntActSet_RF_State() after wake up, because the value of haldata->eRFPowerState */
- /* is the same as eRfOff, we should change it to eRfOn after we config RF parameters. */
- /* Added by tynli. 2010.03.30. */
- pwrctrlpriv->rf_pwrstate = rf_on;
-
- /* enable Tx report. */
- rtw_write8(Adapter, REG_FWHW_TXQ_CTRL + 1, 0x0F);
-
- /* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
- rtw_write8(Adapter, REG_EARLY_MODE_CONTROL + 3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
-
- /* tynli_test_tx_report. */
- rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
-
- /* enable tx DMA to drop the redundate data of packet */
- res = rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK, &value16);
- if (res)
- return _FAIL;
-
- rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (value16 | DROP_DATA_EN));
-
- /* 2010/08/26 MH Merge from 8192CE. */
- if (pwrctrlpriv->rf_pwrstate == rf_on) {
- if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- PHY_IQCalibrate_8188E(Adapter, true);
- } else {
- PHY_IQCalibrate_8188E(Adapter, false);
- haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
- }
-
- ODM_TXPowerTrackingCheck(&haldata->odmpriv);
-
- PHY_LCCalibrate_8188E(Adapter);
- }
-
-/* _InitPABias(Adapter); */
- rtw_write8(Adapter, REG_USB_HRPWM, 0);
-
- /* ack for xmit mgmt frames. */
- res = rtw_read32(Adapter, REG_FWHW_TXQ_CTRL, &reg);
- if (res)
- return _FAIL;
-
- rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, reg | BIT(12));
-
-exit:
- return status;
-}
-
-static void CardDisableRTL8188EU(struct adapter *Adapter)
-{
- u8 val8;
- struct hal_data_8188e *haldata = &Adapter->haldata;
- int res;
-
- /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
- res = rtw_read8(Adapter, REG_TX_RPT_CTRL, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_TX_RPT_CTRL, val8 & (~BIT(1)));
-
- /* stop rx */
- rtw_write8(Adapter, REG_CR, 0x0);
-
- /* Run LPS WL RFOFF flow */
- HalPwrSeqCmdParsing(Adapter, LPS_ENTER_FLOW);
-
- /* 2. 0x1F[7:0] = 0 turn off RF */
-
- res = rtw_read8(Adapter, REG_MCUFWDL, &val8);
- if (res)
- return;
-
- if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */
- /* Reset MCU 0x2[10]=0. */
- res = rtw_read8(Adapter, REG_SYS_FUNC_EN + 1, &val8);
- if (res)
- return;
-
- val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
- rtw_write8(Adapter, REG_SYS_FUNC_EN + 1, val8);
- }
-
- /* reset MCU ready status */
- rtw_write8(Adapter, REG_MCUFWDL, 0);
-
- /* YJ,add,111212 */
- /* Disable 32k */
- res = rtw_read8(Adapter, REG_32K_CTRL, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0)));
-
- /* Card disable power action flow */
- HalPwrSeqCmdParsing(Adapter, DISABLE_FLOW);
-
- /* Reset MCU IO Wrapper */
- res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_RSV_CTRL + 1, (val8 & (~BIT(3))));
-
- res = rtw_read8(Adapter, REG_RSV_CTRL + 1, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_RSV_CTRL + 1, val8 | BIT(3));
-
- /* YJ,test add, 111207. For Power Consumption. */
- res = rtw_read8(Adapter, GPIO_IN, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, GPIO_OUT, val8);
- rtw_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */
-
- res = rtw_read8(Adapter, REG_GPIO_IO_SEL, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_GPIO_IO_SEL, (val8 << 4));
- res = rtw_read8(Adapter, REG_GPIO_IO_SEL + 1, &val8);
- if (res)
- return;
-
- rtw_write8(Adapter, REG_GPIO_IO_SEL + 1, val8 | 0x0F);/* Reg0x43 */
- rtw_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
- haldata->bMacPwrCtrlOn = false;
- Adapter->bFWReady = false;
-}
-
-u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
-{
- rtw_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
- rtw_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
-
- if (!Adapter->pwrctrlpriv.bkeepfwalive) {
- if (Adapter->hw_init_completed) {
- CardDisableRTL8188EU(Adapter);
- }
- }
- return _SUCCESS;
- }
-
-int rtl8188eu_inirp_init(struct adapter *Adapter)
-{
- u8 i;
- struct recv_buf *precvbuf;
- struct recv_priv *precvpriv = &Adapter->recvpriv;
- int ret;
-
- /* issue Rx irp to receive data */
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
- for (i = 0; i < NR_RECVBUFF; i++) {
- ret = rtw_read_port(Adapter, precvbuf);
- if (ret)
- return ret;
-
- precvbuf++;
- precvpriv->free_recv_buf_queue_cnt--;
- }
-
- return 0;
-}
-
-/* */
-/* */
-/* EEPROM/EFUSE Content Parsing */
-/* */
-/* */
-
-static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
-{
- struct eeprom_priv *eeprom = &adapt->eeprompriv;
-
- if (AutoLoadFail) {
- eth_random_addr(eeprom->mac_addr);
- } else {
- /* Read Permanent MAC address */
- memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
- }
-}
-
-int ReadAdapterInfo8188EU(struct adapter *Adapter)
-{
- struct eeprom_priv *eeprom = &Adapter->eeprompriv;
- struct led_priv *ledpriv = &Adapter->ledpriv;
- u8 *efuse_buf;
- u8 eeValue;
- int res;
-
- /* check system boot selection */
- res = rtw_read8(Adapter, REG_9346CR, &eeValue);
- if (res)
- return res;
-
- eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
-
- efuse_buf = kmalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
- if (!efuse_buf)
- return -ENOMEM;
- memset(efuse_buf, 0xFF, EFUSE_MAP_LEN_88E);
-
- if (!(eeValue & BOOT_FROM_EEPROM) && !eeprom->bautoload_fail_flag) {
- rtl8188e_EfusePowerSwitch(Adapter, true);
- rtl8188e_ReadEFuse(Adapter, EFUSE_MAP_LEN_88E, efuse_buf);
- rtl8188e_EfusePowerSwitch(Adapter, false);
- }
-
- /* parse the eeprom/efuse content */
- Hal_EfuseParseIDCode88E(Adapter, efuse_buf);
- Hal_EfuseParseMACAddr_8188EU(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
-
- Hal_ReadPowerSavingMode88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- Hal_ReadTxPowerInfo88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- rtl8188e_EfuseParseChnlPlan(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- Hal_EfuseParseXtal_8188E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- Hal_ReadAntennaDiversity88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
- Hal_ReadThermalMeter_88E(Adapter, efuse_buf, eeprom->bautoload_fail_flag);
-
- ledpriv->bRegUseLed = true;
- kfree(efuse_buf);
- return 0;
-}
-
-void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
-{
- u8 init_rate = 0;
- u8 networkType, raid;
- u32 mask, rate_bitmap;
- u8 shortGIrate = false;
- int supportRateNum = 0;
- struct sta_info *psta;
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- if (mac_id >= NUM_STA) /* CAM_SIZE */
- return;
- psta = pmlmeinfo->FW_sta_info[mac_id].psta;
- if (!psta)
- return;
- switch (mac_id) {
- case 0:/* for infra mode */
- supportRateNum = rtw_get_rateset_len(cur_network->SupportedRates);
- networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
- raid = networktype_to_raid(networkType);
- mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
- if (support_short_GI(adapt, &pmlmeinfo->HT_caps))
- shortGIrate = true;
- break;
- case 1:/* for broadcast/multicast */
- supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- networkType = WIRELESS_11B;
- else
- networkType = WIRELESS_11G;
- raid = networktype_to_raid(networkType);
- mask = update_basic_rate(cur_network->SupportedRates, supportRateNum);
- break;
- default: /* for each sta in IBSS */
- supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
- networkType = judge_network_type(adapt, pmlmeinfo->FW_sta_info[mac_id].SupportedRates, supportRateNum) & 0xf;
- raid = networktype_to_raid(networkType);
- mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
-
- /* todo: support HT in IBSS */
- break;
- }
-
- rate_bitmap = 0x0fffffff;
- rate_bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, mac_id, mask, rssi_level);
-
- mask &= rate_bitmap;
-
- init_rate = get_highest_rate_idx(mask) & 0x3f;
-
- if (haldata->fw_ractrl) {
- mask |= ((raid << 28) & 0xf0000000);
- psta->ra_mask = mask;
- mask |= ((raid << 28) & 0xf0000000);
-
- /* to do ,for 8188E-SMIC */
- rtl8188e_set_raid_cmd(adapt, mask);
- } else {
- ODM_RA_UpdateRateInfo_8188E(&haldata->odmpriv,
- mac_id,
- raid,
- mask,
- shortGIrate
- );
- }
- /* set ra_id */
- psta->raid = raid;
- psta->init_rate = init_rate;
-}
-
-void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
-{
- u32 value32;
- struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u32 bcn_ctrl_reg = REG_BCN_CTRL;
- int res;
- u8 reg;
- /* reset TSF, enable update TSF, correcting TSF On Beacon */
-
- /* BCN interval */
- rtw_write16(adapt, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
- rtw_write8(adapt, REG_ATIMWND, 0x02);/* 2ms */
-
- _InitBeaconParameters(adapt);
-
- rtw_write8(adapt, REG_SLOT, 0x09);
-
- res = rtw_read32(adapt, REG_TCR, &value32);
- if (res)
- return;
-
- value32 &= ~TSFRST;
- rtw_write32(adapt, REG_TCR, value32);
-
- value32 |= TSFRST;
- rtw_write32(adapt, REG_TCR, value32);
-
- /* NOTE: Fix test chip's bug (about contention windows's randomness) */
- rtw_write8(adapt, REG_RXTSF_OFFSET_CCK, 0x50);
- rtw_write8(adapt, REG_RXTSF_OFFSET_OFDM, 0x50);
-
- _BeaconFunctionEnable(adapt);
-
- rtw_resume_tx_beacon(adapt);
-
- res = rtw_read8(adapt, bcn_ctrl_reg, &reg);
- if (res)
- return;
-
- rtw_write8(adapt, bcn_ctrl_reg, reg | BIT(1));
-}
-
-void rtl8188eu_init_default_value(struct adapter *adapt)
-{
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct pwrctrl_priv *pwrctrlpriv;
- u8 i;
-
- pwrctrlpriv = &adapt->pwrctrlpriv;
-
- /* init default value */
- haldata->fw_ractrl = false;
- if (!pwrctrlpriv->bkeepfwalive)
- haldata->LastHMEBoxNum = 0;
-
- /* init dm default value */
- haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = false;
- haldata->odmpriv.RFCalibrateInfo.TM_Trigger = 0;/* for IQK */
- haldata->pwrGroupCnt = 0;
- haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP_index = 0;
- for (i = 0; i < HP_THERMAL_NUM; i++)
- haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
-}
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
deleted file mode 100644
index 9611b19ab55b..000000000000
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ /dev/null
@@ -1,476 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-#include "../include/rtl8188e_hal.h"
-
-#define VENDOR_CMD_MAX_DATA_LEN 254
-
-#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
-
-static int usb_read(struct adapter *adapt, u16 value, void *data, u8 size)
-{
- struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
- struct usb_device *udev = dvobjpriv->pusbdev;
- int status;
- u8 io_buf[4];
-
- if (adapt->bSurpriseRemoved)
- return -EPERM;
-
- status = usb_control_msg_recv(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_READ, value,
- REALTEK_USB_VENQT_CMD_IDX, io_buf,
- size, RTW_USB_CONTROL_MSG_TIMEOUT,
- GFP_KERNEL);
-
- if (status == -ESHUTDOWN ||
- status == -ENODEV ||
- status == -ENOENT) {
- /*
- * device or controller has been disabled due to
- * some problem that could not be worked around,
- * device or bus doesn’t exist, endpoint does not
- * exist or is not enabled.
- */
- adapt->bSurpriseRemoved = true;
- return status;
- }
-
- if (status < 0) {
- if (rtw_inc_and_chk_continual_urb_error(dvobjpriv))
- adapt->bSurpriseRemoved = true;
-
- return status;
- }
-
- rtw_reset_continual_urb_error(dvobjpriv);
- memcpy(data, io_buf, size);
-
- return status;
-}
-
-static int usb_write(struct adapter *adapt, u16 value, void *data, u8 size)
-{
- struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
- struct usb_device *udev = dvobjpriv->pusbdev;
- int status;
- u8 io_buf[VENDOR_CMD_MAX_DATA_LEN];
-
- if (adapt->bSurpriseRemoved)
- return -EPERM;
-
- memcpy(io_buf, data, size);
- status = usb_control_msg_send(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_WRITE, value,
- REALTEK_USB_VENQT_CMD_IDX, io_buf,
- size, RTW_USB_CONTROL_MSG_TIMEOUT,
- GFP_KERNEL);
-
- if (status == -ESHUTDOWN ||
- status == -ENODEV ||
- status == -ENOENT) {
- /*
- * device or controller has been disabled due to
- * some problem that could not be worked around,
- * device or bus doesn’t exist, endpoint does not
- * exist or is not enabled.
- */
- adapt->bSurpriseRemoved = true;
- return status;
- }
-
- if (status < 0) {
- if (rtw_inc_and_chk_continual_urb_error(dvobjpriv))
- adapt->bSurpriseRemoved = true;
-
- return status;
- }
-
- rtw_reset_continual_urb_error(dvobjpriv);
-
- return status;
-}
-
-int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data)
-{
- u16 value = addr & 0xffff;
-
- return usb_read(adapter, value, data, 1);
-}
-
-int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data)
-{
- u16 value = addr & 0xffff;
- __le16 le_data;
- int res;
-
- res = usb_read(adapter, value, &le_data, 2);
- if (res)
- return res;
-
- *data = le16_to_cpu(le_data);
-
- return 0;
-}
-
-int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data)
-{
- u16 value = addr & 0xffff;
- __le32 le_data;
- int res;
-
- res = usb_read(adapter, value, &le_data, 4);
- if (res)
- return res;
-
- *data = le32_to_cpu(le_data);
-
- return 0;
-}
-
-int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
-{
- u16 value = addr & 0xffff;
- int ret;
-
- ret = usb_write(adapter, value, &val, 1);
-
- return RTW_STATUS_CODE(ret);
-}
-
-int rtw_write16(struct adapter *adapter, u32 addr, u16 val)
-{
- u16 value = addr & 0xffff;
- __le16 data = cpu_to_le16(val);
- int ret;
-
- ret = usb_write(adapter, value, &data, 2);
-
- return RTW_STATUS_CODE(ret);
-}
-
-int rtw_write32(struct adapter *adapter, u32 addr, u32 val)
-{
- u16 value = addr & 0xffff;
- __le32 data = cpu_to_le32(val);
- int ret;
-
- ret = usb_write(adapter, value, &data, 4);
-
- return RTW_STATUS_CODE(ret);
-}
-
-int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
-{
- u16 value = addr & 0xffff;
-
- if (length > VENDOR_CMD_MAX_DATA_LEN)
- return -EINVAL;
-
- return usb_write(adapter, value, data, length);
-}
-
-static void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
-{
- struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
-
- if (txrpt_ccx->int_ccx) {
- if (txrpt_ccx->pkt_ok)
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_SUCCESS);
- else
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
-
-static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
-{
- u8 *pbuf;
- u8 shift_sz = 0;
- u16 pkt_cnt;
- u32 pkt_offset, skb_len, alloc_sz;
- s32 transfer_len;
- struct recv_stat *prxstat;
- struct phy_stat *pphy_status = NULL;
- struct sk_buff *pkt_copy = NULL;
- struct recv_frame *precvframe = NULL;
- struct rx_pkt_attrib *pattrib = NULL;
- struct hal_data_8188e *haldata = &adapt->haldata;
- struct recv_priv *precvpriv = &adapt->recvpriv;
- struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
-
- transfer_len = (s32)pskb->len;
- pbuf = pskb->data;
-
- prxstat = (struct recv_stat *)pbuf;
- pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
-
- do {
- prxstat = (struct recv_stat *)pbuf;
-
- precvframe = rtw_alloc_recvframe(pfree_recv_queue);
- if (!precvframe)
- goto _exit_recvbuf2recvframe;
-
- INIT_LIST_HEAD(&precvframe->list);
- precvframe->precvbuf = NULL; /* can't access the precvbuf for new arch. */
- precvframe->len = 0;
-
- update_recvframe_attrib_88e(precvframe, prxstat);
-
- pattrib = &precvframe->attrib;
-
- if ((pattrib->crc_err) || (pattrib->icv_err)) {
- rtw_free_recvframe(precvframe, pfree_recv_queue);
- goto _exit_recvbuf2recvframe;
- }
-
- if ((pattrib->physt) && (pattrib->pkt_rpt_type == NORMAL_RX))
- pphy_status = (struct phy_stat *)(pbuf + RXDESC_OFFSET);
-
- pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz + pattrib->shift_sz + pattrib->pkt_len;
-
- if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) {
- rtw_free_recvframe(precvframe, pfree_recv_queue);
- goto _exit_recvbuf2recvframe;
- }
-
- /* Modified by Albert 20101213 */
- /* For 8 bytes IP header alignment. */
- if (pattrib->qos) /* Qos data, wireless lan header length is 26 */
- shift_sz = 6;
- else
- shift_sz = 0;
-
- skb_len = pattrib->pkt_len;
-
- /* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
- /* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
- if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
- if (skb_len <= 1650)
- alloc_sz = 1664;
- else
- alloc_sz = skb_len + 14;
- } else {
- alloc_sz = skb_len;
- /* 6 is for IP header 8 bytes alignment in QoS packet case. */
- /* 8 is for skb->data 4 bytes alignment. */
- alloc_sz += 14;
- }
-
- pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
- if (pkt_copy) {
- precvframe->pkt = pkt_copy;
- precvframe->rx_head = pkt_copy->data;
- precvframe->rx_end = pkt_copy->data + alloc_sz;
- skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
- skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
- memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
- precvframe->rx_tail = pkt_copy->data;
- precvframe->rx_data = pkt_copy->data;
- } else {
- if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
- rtw_free_recvframe(precvframe, pfree_recv_queue);
- goto _exit_recvbuf2recvframe;
- }
- precvframe->pkt = skb_clone(pskb, GFP_ATOMIC);
- if (precvframe->pkt) {
- precvframe->rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
- precvframe->rx_head = precvframe->rx_tail;
- precvframe->rx_data = precvframe->rx_tail;
- precvframe->rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
- } else {
- rtw_free_recvframe(precvframe, pfree_recv_queue);
- goto _exit_recvbuf2recvframe;
- }
- }
-
- recvframe_put(precvframe, skb_len);
-
- pkt_offset = (u16)round_up(pkt_offset, 128);
-
- if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
- if (pattrib->physt)
- update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
- rtw_recv_entry(precvframe);
- } else {
- /* enqueue recvframe to txrtp queue */
- if (pattrib->pkt_rpt_type == TX_REPORT1) {
- /* CCX-TXRPT ack for xmit mgmt frames. */
- handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
- } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
- ODM_RA_TxRPT2Handle_8188E(
- &haldata->odmpriv,
- precvframe->rx_data,
- pattrib->pkt_len,
- pattrib->MacIDValidEntry[0],
- pattrib->MacIDValidEntry[1]
- );
- }
- rtw_free_recvframe(precvframe, pfree_recv_queue);
- }
- pkt_cnt--;
- transfer_len -= pkt_offset;
- pbuf += pkt_offset;
- precvframe = NULL;
- pkt_copy = NULL;
-
- if (transfer_len > 0 && pkt_cnt == 0)
- pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
-
- } while ((transfer_len > 0) && (pkt_cnt > 0));
-
-_exit_recvbuf2recvframe:
-
- return _SUCCESS;
-}
-
-void rtl8188eu_recv_tasklet(unsigned long priv)
-{
- struct sk_buff *pskb;
- struct adapter *adapt = (struct adapter *)priv;
- struct recv_priv *precvpriv = &adapt->recvpriv;
-
- while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
- if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved)) {
- dev_kfree_skb_any(pskb);
- break;
- }
- recvbuf2recvframe(adapt, pskb);
- skb_reset_tail_pointer(pskb);
- pskb->len = 0;
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
-}
-
-static void usb_read_port_complete(struct urb *purb)
-{
- struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
- struct adapter *adapt = (struct adapter *)precvbuf->adapter;
- struct recv_priv *precvpriv = &adapt->recvpriv;
-
- precvpriv->rx_pending_cnt--;
-
- if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
- precvbuf->reuse = true;
- return;
- }
-
- if (purb->status == 0) { /* SUCCESS */
- if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
- precvbuf->reuse = true;
- rtw_read_port(adapt, precvbuf);
- } else {
- rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
-
- skb_put(precvbuf->pskb, purb->actual_length);
- skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb);
-
- if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1)
- tasklet_schedule(&precvpriv->recv_tasklet);
-
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- rtw_read_port(adapt, precvbuf);
- }
- } else {
- skb_put(precvbuf->pskb, purb->actual_length);
- precvbuf->pskb = NULL;
-
- if (rtw_inc_and_chk_continual_urb_error(adapter_to_dvobj(adapt)))
- adapt->bSurpriseRemoved = true;
-
- switch (purb->status) {
- case -EINVAL:
- case -EPIPE:
- case -ENODEV:
- case -ESHUTDOWN:
- case -ENOENT:
- adapt->bDriverStopped = true;
- break;
- case -EPROTO:
- case -EOVERFLOW:
- precvbuf->reuse = true;
- rtw_read_port(adapt, precvbuf);
- break;
- case -EINPROGRESS:
- break;
- default:
- break;
- }
- }
-}
-
-int rtw_read_port(struct adapter *adapter, struct recv_buf *precvbuf)
-{
- struct urb *purb = NULL;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
- struct recv_priv *precvpriv = &adapter->recvpriv;
- struct usb_device *pusbd = pdvobj->pusbdev;
- int err;
- unsigned int pipe;
- size_t tmpaddr = 0;
- size_t alignment = 0;
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return -EPERM;
-
- if (!precvbuf)
- return -ENOMEM;
-
- if (!precvbuf->reuse || !precvbuf->pskb) {
- precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
- if (precvbuf->pskb)
- precvbuf->reuse = true;
- }
-
- /* re-assign for linux based on skb */
- if (!precvbuf->reuse || !precvbuf->pskb) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (!precvbuf->pskb)
- return -ENOMEM;
-
- tmpaddr = (size_t)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
- } else { /* reuse skb */
- precvbuf->reuse = false;
- }
-
- precvpriv->rx_pending_cnt++;
-
- purb = precvbuf->purb;
-
- /* translate DMA FIFO addr to pipehandle */
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe);
-
- usb_fill_bulk_urb(purb, pusbd, pipe,
- precvbuf->pskb->data,
- MAX_RECVBUF_SZ,
- usb_read_port_complete,
- precvbuf);/* context is precvbuf */
-
- err = usb_submit_urb(purb, GFP_ATOMIC);
- if ((err) && (err != (-EPERM)))
- return err;
-
- return 0;
-}
-
-void rtl8188eu_xmit_tasklet(unsigned long priv)
-{
- struct adapter *adapt = (struct adapter *)priv;
-
- if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
- return;
-
- do {
- if (adapt->bDriverStopped || adapt->bSurpriseRemoved || adapt->bWritePortCancel)
- break;
- } while (rtl8188eu_xmitframe_complete(adapt));
-}
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
deleted file mode 100644
index 4a0b782c33be..000000000000
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_HAL8188EPHYCFG_H__
-#define __INC_HAL8188EPHYCFG_H__
-
-#define MAX_AGGR_NUM 0x07
-
-enum rf_radio_path {
- RF_PATH_A = 0, /* Radio Path A */
- RF_PATH_B = 1, /* Radio Path B */
-};
-
-#define MAX_PG_GROUP 13
-
-#define RF_PATH_MAX 3
-#define MAX_TX_COUNT 4 /* path numbers */
-
-#define CHANNEL_MAX_NUMBER 14 /* 14 is the max chnl number */
-#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
- *ch9~11, ch12~13, CH 14
- * total three groups */
-
-struct bb_reg_def {
- u32 rfintfs; /* set software control: */
- /* 0x870~0x877[8 bytes] */
- u32 rfintfi; /* readback data: */
- /* 0x8e0~0x8e7[8 bytes] */
- u32 rfintfo; /* output data: */
- /* 0x860~0x86f [16 bytes] */
- u32 rfintfe; /* output enable: */
- /* 0x860~0x86f [16 bytes] */
- u32 rf3wireOffset; /* LSSI data: */
- /* 0x840~0x84f [16 bytes] */
- u32 rfLSSI_Select; /* BB Band Select: */
- /* 0x878~0x87f [8 bytes] */
- u32 rfTxGainStage; /* Tx gain stage: */
- /* 0x80c~0x80f [4 bytes] */
- u32 rfHSSIPara1; /* wire parameter control1 : */
- /* 0x820~0x823,0x828~0x82b,
- * 0x830~0x833, 0x838~0x83b [16 bytes] */
- u32 rfHSSIPara2; /* wire parameter control2 : */
- /* 0x824~0x827,0x82c~0x82f, 0x834~0x837,
- * 0x83c~0x83f [16 bytes] */
- u32 rfSwitchControl; /* Tx Rx antenna control : */
- /* 0x858~0x85f [16 bytes] */
- u32 rfAGCControl1; /* AGC parameter control1 : */
- /* 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63,
- * 0xc68~0xc6b [16 bytes] */
- u32 rfAGCControl2; /* AGC parameter control2 : */
- /* 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67,
- * 0xc6c~0xc6f [16 bytes] */
- u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
- /* 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27,
- * 0xc2c~0xc2f [16 bytes] */
- u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter,
- * Rx DC notch filter : */
- /* 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23,
- * 0xc28~0xc2b [16 bytes] */
- u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
- /* 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93,
- * 0xc98~0xc9b [16 bytes] */
- u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
- /* 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97,
- * 0xc9c~0xc9f [16 bytes] */
- u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
- /* 0x8a0~0x8af [16 bytes] */
- u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for
- * Path A and B */
-};
-
-/* BB and RF register read/write */
-u32 rtl8188e_PHY_QueryBBReg(struct adapter *adapter, u32 regaddr, u32 mask);
-void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr,
- u32 mask, u32 data);
-u32 rtl8188e_PHY_QueryRFReg(struct adapter *adapter, u32 regaddr, u32 mask);
-void rtl8188e_PHY_SetRFReg(struct adapter *adapter, u32 regaddr, u32 mask, u32 data);
-
-/* Initialization related function */
-/* MAC/BB/RF HAL config */
-int PHY_MACConfig8188E(struct adapter *adapter);
-int PHY_BBConfig8188E(struct adapter *adapter);
-
-/* BB TX Power R/W */
-void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
-
-/* Switch bandwidth for 8192S */
-void PHY_SetBWMode8188E(struct adapter *adapter,
- enum ht_channel_width chnlwidth, unsigned char offset);
-
-/* channel switch related funciton */
-void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
-
-void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
- u32 mask, u32 data);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyReg.h b/drivers/staging/r8188eu/include/Hal8188EPhyReg.h
deleted file mode 100644
index da2329be4474..000000000000
--- a/drivers/staging/r8188eu/include/Hal8188EPhyReg.h
+++ /dev/null
@@ -1,1072 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_HAL8188EPHYREG_H__
-#define __INC_HAL8188EPHYREG_H__
-/*--------------------------Define Parameters-------------------------------*/
-/* */
-/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
-/* 3. RF register 0x00-2E */
-/* 4. Bit Mask for BB/RF register */
-/* 5. Other definition for BB/RF R/W */
-/* */
-
-/* */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 1. Page1(0x100) */
-/* */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
-/* 2. Page2(0x200) */
-/* The following two definition are only used for USB interface. */
-#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB r/w cmd address. */
-#define RF_BB_CMD_DATA 0x02c4 /* RF/BB r/w cmd data. */
-
-/* 3. Page8(0x800) */
-#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting */
-
-#define rFPGA0_TxInfo 0x804 /* Status report?? */
-#define rFPGA0_PSDFunction 0x808
-
-#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
-
-#define rFPGA0_RFTiming1 0x810 /* Useless now */
-#define rFPGA0_RFTiming2 0x814
-
-#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-
-#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
-#define rFPGA0_RFSleepUpParameter 0x854
-
-#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
-#define rFPGA0_XCD_SwitchControl 0x85c
-
-#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
-#define rFPGA0_XB_RFInterfaceOE 0x864
-
-#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Iface Software Control */
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-
-#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
-#define rFPGA0_XCD_RFParameter 0x87c
-
-/* Crystal cap setting RF-R/W protection for parameter4?? */
-#define rFPGA0_AnalogParameter1 0x880
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888
-/* enable ad/da clock1 for dual-phy */
-#define rFPGA0_AdDaClockEn 0x888
-#define rFPGA0_AnalogParameter4 0x88c
-
-#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Transceiver LSSI Readback */
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-
-#define rFPGA0_PSDReport 0x8b4 /* Useless now */
-/* Transceiver A HSPI Readback */
-#define TransceiverA_HSPI_Readback 0x8b8
-/* Transceiver B HSPI Readback */
-#define TransceiverB_HSPI_Readback 0x8bc
-/* Useless now RF Interface Readback Value */
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
-
-/* 4. Page9(0x900) */
-/* RF mode & OFDM TxSC RF BW Setting?? */
-#define rFPGA1_RFMOD 0x900
-
-#define rFPGA1_TxBlock 0x904 /* Useless now */
-#define rFPGA1_DebugSelect 0x908 /* Useless now */
-#define rFPGA1_TxInfo 0x90c /* Useless now Status report */
-
-/* 5. PageA(0xA00) */
-/* Set Control channel to upper or lower - required only for 40MHz */
-#define rCCK0_System 0xa00
-
-/* Disable init gain now Select RX path by RSSI */
-#define rCCK0_AFESetting 0xa04
-/* Disable init gain now Init gain */
-#define rCCK0_CCA 0xa08
-
-/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
- * RX LNA Threshold useless now. Not the same as 90 series */
-#define rCCK0_RxAGC1 0xa0c
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-
-#define rCCK0_RxHP 0xa14
-
-/* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter1 0xa18
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
-
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
-
-/* */
-/* PageB(0xB00) */
-/* */
-#define rPdp_AntA 0xb00
-#define rPdp_AntA_4 0xb04
-#define rConfig_Pmpd_AntA 0xb28
-#define rConfig_AntA 0xb68
-#define rConfig_AntB 0xb6c
-#define rPdp_AntB 0xb70
-#define rPdp_AntB_4 0xb74
-#define rConfig_Pmpd_AntB 0xb98
-#define rAPK 0xbd8
-
-/* */
-/* 6. PageC(0xC00) */
-/* */
-#define rOFDM0_LSTF 0xc00
-
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
-
-/* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxAFE 0xc10
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-
-#define rOFDM0_RxDetector1 0xc30 /*PD,BW & SBD DM tune init gain*/
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
-
-#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
-#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
-
-#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
-
-#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-
-#define rOFDM0_RxIQExtAnta 0xca0
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-
-/* */
-/* 7. PageD(0xD00) */
-/* */
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-#define rOFDM1_CFO 0xd08 /* No setting now */
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-
-#define rOFDM_ShortCFOAB 0xdac /* No setting now */
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
-
-/* */
-/* 8. PageE(0xE00) */
-/* */
-#define rTxAGC_A_Rate18_06 0xe00
-#define rTxAGC_A_Rate54_24 0xe04
-#define rTxAGC_A_CCK1_Mcs32 0xe08
-#define rTxAGC_A_Mcs03_Mcs00 0xe10
-#define rTxAGC_A_Mcs07_Mcs04 0xe14
-#define rTxAGC_A_Mcs11_Mcs08 0xe18
-#define rTxAGC_A_Mcs15_Mcs12 0xe1c
-
-#define rTxAGC_B_Rate18_06 0x830
-#define rTxAGC_B_Rate54_24 0x834
-#define rTxAGC_B_CCK1_55_Mcs32 0x838
-#define rTxAGC_B_Mcs03_Mcs00 0x83c
-#define rTxAGC_B_Mcs07_Mcs04 0x848
-#define rTxAGC_B_Mcs11_Mcs08 0x84c
-#define rTxAGC_B_Mcs15_Mcs12 0x868
-#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
-
-#define rFPGA0_IQK 0xe28
-#define rTx_IQK_Tone_A 0xe30
-#define rRx_IQK_Tone_A 0xe34
-#define rTx_IQK_PI_A 0xe38
-#define rRx_IQK_PI_A 0xe3c
-
-#define rTx_IQK 0xe40
-#define rRx_IQK 0xe44
-#define rIQK_AGC_Pts 0xe48
-#define rIQK_AGC_Rsp 0xe4c
-#define rTx_IQK_Tone_B 0xe50
-#define rRx_IQK_Tone_B 0xe54
-#define rTx_IQK_PI_B 0xe58
-#define rRx_IQK_PI_B 0xe5c
-#define rIQK_AGC_Cont 0xe60
-
-#define rBlue_Tooth 0xe6c
-#define rRx_Wait_CCA 0xe70
-#define rTx_CCK_RFON 0xe74
-#define rTx_CCK_BBON 0xe78
-#define rTx_OFDM_RFON 0xe7c
-#define rTx_OFDM_BBON 0xe80
-#define rTx_To_Rx 0xe84
-#define rTx_To_Tx 0xe88
-#define rRx_CCK 0xe8c
-
-#define rTx_Power_Before_IQK_A 0xe94
-#define rTx_Power_After_IQK_A 0xe9c
-
-#define rRx_Power_Before_IQK_A 0xea0
-#define rRx_Power_Before_IQK_A_2 0xea4
-#define rRx_Power_After_IQK_A 0xea8
-#define rRx_Power_After_IQK_A_2 0xeac
-
-#define rTx_Power_Before_IQK_B 0xeb4
-#define rTx_Power_After_IQK_B 0xebc
-
-#define rRx_Power_Before_IQK_B 0xec0
-#define rRx_Power_Before_IQK_B_2 0xec4
-#define rRx_Power_After_IQK_B 0xec8
-#define rRx_Power_After_IQK_B_2 0xecc
-
-#define rRx_OFDM 0xed0
-#define rRx_Wait_RIFS 0xed4
-#define rRx_TO_Rx 0xed8
-#define rStandby 0xedc
-#define rSleep 0xee0
-#define rPMPD_ANAEN 0xeec
-
-/* */
-/* 7. RF Register 0x00-0x2E (RF 8256) */
-/* RF-0222D 0x00-3F */
-/* */
-/* Zebra1 */
-#define rZebra1_HSSIEnable 0x0 /* Useless now */
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-#define rZebra1_Channel 0x7 /* RF channel switch */
-
-/* endif */
-#define rZebra1_TxGain 0x8 /* Useless now */
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
-
-/* Zebra4 */
-#define rGlobalCtrl 0 /* Useless now */
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11 /* Useless now */
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
-
-/* */
-/* RL6052 Register definition */
-/* */
-#define RF_AC 0x00 /* */
-
-#define RF_IQADJ_G1 0x01 /* */
-#define RF_IQADJ_G2 0x02 /* */
-
-#define RF_POW_TRSW 0x05 /* */
-
-#define RF_GAIN_RX 0x06 /* */
-#define RF_GAIN_TX 0x07 /* */
-
-#define RF_TXM_IDAC 0x08 /* */
-#define RF_IPA_G 0x09 /* */
-#define RF_TXBIAS_G 0x0A
-#define RF_TXPA_AG 0x0B
-#define RF_IPA_A 0x0C /* */
-#define RF_TXBIAS_A 0x0D
-#define RF_BS_PA_APSET_G9_G11 0x0E
-#define RF_BS_IQGEN 0x0F /* */
-
-#define RF_MODE1 0x10 /* */
-#define RF_MODE2 0x11 /* */
-
-#define RF_RX_AGC_HP 0x12 /* */
-#define RF_TX_AGC 0x13 /* */
-#define RF_BIAS 0x14 /* */
-#define RF_IPA 0x15 /* */
-#define RF_TXBIAS 0x16
-#define RF_POW_ABILITY 0x17 /* */
-#define RF_CHNLBW 0x18 /* RF channel and BW switch */
-#define RF_TOP 0x19 /* */
-
-#define RF_RX_G1 0x1A /* */
-#define RF_RX_G2 0x1B /* */
-
-#define RF_RX_BB2 0x1C /* */
-#define RF_RX_BB1 0x1D /* */
-
-#define RF_RCK1 0x1E /* */
-#define RF_RCK2 0x1F /* */
-
-#define RF_TX_G1 0x20 /* */
-#define RF_TX_G2 0x21 /* */
-#define RF_TX_G3 0x22 /* */
-
-#define RF_TX_BB1 0x23 /* */
-
-#define RF_T_METER_92D 0x42 /* */
-#define RF_T_METER_88E 0x42 /* */
-#define RF_T_METER 0x24 /* */
-
-#define RF_SYN_G1 0x25 /* RF TX Power control */
-#define RF_SYN_G2 0x26 /* RF TX Power control */
-#define RF_SYN_G3 0x27 /* RF TX Power control */
-#define RF_SYN_G4 0x28 /* RF TX Power control */
-#define RF_SYN_G5 0x29 /* RF TX Power control */
-#define RF_SYN_G6 0x2A /* RF TX Power control */
-#define RF_SYN_G7 0x2B /* RF TX Power control */
-#define RF_SYN_G8 0x2C /* RF TX Power control */
-
-#define RF_RCK_OS 0x30 /* RF TX PA control */
-#define RF_TXPA_G1 0x31 /* RF TX PA control */
-#define RF_TXPA_G2 0x32 /* RF TX PA control */
-#define RF_TXPA_G3 0x33 /* RF TX PA control */
-#define RF_TX_BIAS_A 0x35
-#define RF_TX_BIAS_D 0x36
-#define RF_LOBF_9 0x38
-#define RF_RXRF_A3 0x3C /* */
-#define RF_TRSW 0x3F
-
-#define RF_TXRF_A2 0x41
-#define RF_TXPA_G4 0x46
-#define RF_TXPA_A4 0x4B
-#define RF_0x52 0x52
-#define RF_WE_LUT 0xEF
-
-/* */
-/* Bit Mask */
-/* */
-/* 1. Page1(0x100) */
-#define bBBResetB 0x100 /* Useless now? */
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-#define IS_BB_REG_OFFSET_92S(_Offset) \
- ((_Offset >= 0x800) && (_Offset <= 0xfff))
-
-/* 2. Page8(0x800) */
-#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-
-#define bOFDMRxADCPhase 0x10000 /* Useless now */
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-
-#define bAntennaSelect 0x0300
-
-#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-
-#define bPAStart 0xf0000000 /* Useless now */
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* change gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-
-/* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
-#define b3WireDataLength 0x800
-#define b3WireAddressLength 0x400
-
-#define b3WireRFPowerDown 0x1 /* Useless now */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf
-
-#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
-
-#define bRFSI_TRSW 0x20 /* Useless now */
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-
-#define bLSSIReadAddress 0x7f800000 /* T65 RF */
-
-#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-
-#define bLSSIReadBackData 0xfffff /* T65 RF */
-
-#define bLSSIReadOKFlag 0x1000 /* Useless now */
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-
-/* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
-#define bADClkPhase 0x4000000
-
-#define b80MClkDelay 0x18000000 /* Useless */
-#define bAFEWatchDogEnable 0x20000000
-
-/* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
-#define bXtalCap01 0xc0000000
-#define bXtalCap23 0x3
-#define bXtalCap92x 0x0f000000
-#define bXtalCap 0x0f000000
-
-#define bIntDifClkEnable 0x400 /* Useless */
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-#define bCCKRxAGCFormat 0x200
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* 3. Page9(0x900) */
-#define bOFDMTxSC 0x30000000 /* Useless */
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and HWord, LWord */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
-
-/* 4. PageA(0xA00) */
-#define bCCKBBMode 0x3 /* Useless */
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-
-#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 */
-
-#define bCCKScramble 0x8 /* Useless */
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* 5. PageC(0xC00) */
-#define bNumOfSTF 0x3 /* Useless */
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* threshold for high power */
-#define bRSSI_Gen 0x7f000000 /* threshold for ant diversity */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-#define bDAFormat 0x40000
-#define bTxChEmuEnable 0x01000000
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-#define bExtLNAGain 0x7c00
-
-/* 6. PageE(0xE00) */
-#define bSTBCEn 0x4 /* Useless */
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf /* Useless */
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3 /* Useless */
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1 /* Useless */
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000 /* Useless */
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
-
-/* 7. RF Register */
-/* Zebra1 */
-#define bZebra1_HSSIEnable 0x8 /* Useless */
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
-#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100 /* Useless */
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
-
-/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc /* Useless */
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-/* */
-/* Other Definition */
-/* */
-
-/* byte endable for sb_write */
-#define bByte0 0x1 /* Useless */
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
-/* for PutRegsetting & GetRegSetting BitMask */
-#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-#define bMask12Bits 0xfff
-#define bMaskH4Bits 0xf0000000
-#define bMaskOFDM_D 0xffc00000
-#define bMaskCCK 0x3f3f3f3f
-
-/* for PutRFRegsetting & GetRFRegSetting BitMask */
-#define bRFRegOffsetMask 0xfffff
-
-#define bEnable 0x1 /* Useless */
-#define bDisable 0x0
-
-#define LeftAntenna 0x0 /* Useless */
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms Useless */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0 /* Useless */
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff /* Useless */
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-#define bPMACControl 0x0 /* Useless */
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define PathA 0x0 /* Useless */
-#define PathB 0x1
-#define PathC 0x2
-#define PathD 0x3
-
-/*--------------------------Define Parameters-------------------------------*/
-
-#endif
diff --git a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
deleted file mode 100644
index c571ad9478ea..000000000000
--- a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright (c) 2011 Realtek Semiconductor Corp. */
-
-#ifndef __INC_RA_H
-#define __INC_RA_H
-/* Module Name: RateAdaptive.h
- * Abstract: Prototype of RA and related data structure.
- */
-
-#include <linux/bitfield.h>
-
-/* Rate adaptive define */
-#define PERENTRY 23
-#define RETRYSIZE 5
-#define RATESIZE 28
-#define TX_RPT2_ITEM_SIZE 8
-
-/* TX report 2 format in Rx desc */
-#define GET_TX_RPT2_DESC_PKT_LEN_88E(__rxstatusdesc) \
- le32_get_bits(*(__le32 *)__rxstatusdesc, GENMASK(8, 0))
-#define GET_TX_RPT2_DESC_MACID_VALID_1_88E(__rxstatusdesc) \
- le32_to_cpu((*(__le32 *)(__rxstatusdesc + 16))
-#define GET_TX_RPT2_DESC_MACID_VALID_2_88E(__rxstatusdesc) \
- le32_to_cpu((*(__le32 *)(__rxstatusdesc + 20))
-/* End rate adaptive define */
-
-int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
-
-int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 MacID);
-
-u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
-
-u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
-
-u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
-void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 MacID,
- u8 RateID, u32 RateMask,
- u8 SGIEnable);
-
-void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid,
- u8 rssi);
-
-void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm,
- u8 *txrpt_buf, u16 txrpt_len,
- u32 validentry0, u32 validentry1);
-
-void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h b/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
deleted file mode 100644
index 0a290bc31c4d..000000000000
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_BB_8188E_HW_IMG_H
-#define __INC_BB_8188E_HW_IMG_H
-
-/* static bool CheckCondition(const u32 Condition, const u32 Hex); */
-
-/******************************************************************************
-* AGC_TAB_1T.TXT
-******************************************************************************/
-
-int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
-
-/******************************************************************************
-* PHY_REG_1T.TXT
-******************************************************************************/
-
-int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
-
-/******************************************************************************
-* PHY_REG_PG.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h b/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
deleted file mode 100644
index b3d67c1a8050..000000000000
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_MAC_8188E_HW_IMG_H
-#define __INC_MAC_8188E_HW_IMG_H
-
-/******************************************************************************
-* MAC_REG.TXT
-******************************************************************************/
-int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h b/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
deleted file mode 100644
index 880feadb4340..000000000000
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_RF_8188E_HW_IMG_H
-#define __INC_RF_8188E_HW_IMG_H
-
-/******************************************************************************
- * RadioA_1T.TXT
- ******************************************************************************/
-
-int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h b/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
deleted file mode 100644
index b75a5d869c56..000000000000
--- a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HAL_PHY_RF_8188E_H__
-#define __HAL_PHY_RF_8188E_H__
-
-/*--------------------------Define Parameters-------------------------------*/
-#define IQK_DELAY_TIME_88E 10 /* ms */
-#define index_mapping_NUM_88E 15
-#define AVG_THERMAL_NUM_88E 4
-
-void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *pDM_Odm,
- u8 Type, /* 0 = OFDM, 1 = CCK */
- u8 *pDirection,/* 1 = +(incr) 2 = -(decr) */
- u32 *pOutWriteVal); /* Tx tracking CCK/OFDM BB
- * swing index adjust */
-
-void odm_TXPowerTrackingCallback_ThermalMeter_8188E(struct adapter *Adapter);
-
-/* 1 7. IQK */
-
-void PHY_IQCalibrate_8188E(struct adapter *Adapter, bool ReCovery);
-
-/* LC calibrate */
-void PHY_LCCalibrate_8188E(struct adapter *pAdapter);
-
-/* AP calibrate */
-void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
-
-void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
- u32 *ADDABackup, u32 RegisterNum);
-
-void _PHY_MACSettingCalibration(struct adapter *pAdapter, u32 *MACReg,
- u32 *MACBackup);
-
-#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
deleted file mode 100644
index 0886300d26bf..000000000000
--- a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HALPWRSEQCMD_H__
-#define __HALPWRSEQCMD_H__
-
-#include "drv_types.h"
-
-enum r8188eu_pwr_seq {
- PWR_ON_FLOW,
- DISABLE_FLOW,
- LPS_ENTER_FLOW,
-};
-
-/* Prototype of protected function. */
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, enum r8188eu_pwr_seq seq);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
deleted file mode 100644
index 7a530c7d57eb..000000000000
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#ifndef __HAL_VERSION_DEF_H__
-#define __HAL_VERSION_DEF_H__
-
-enum HAL_CHIP_TYPE {
- TEST_CHIP = 0,
- NORMAL_CHIP = 1,
-};
-
-enum HAL_CUT_VERSION {
- A_CUT_VERSION = 0,
- B_CUT_VERSION = 1,
- C_CUT_VERSION = 2,
- D_CUT_VERSION = 3,
- E_CUT_VERSION = 4,
-};
-
-enum HAL_VENDOR {
- CHIP_VENDOR_TSMC = 0,
- CHIP_VENDOR_UMC = 1,
-};
-
-struct HAL_VERSION {
- enum HAL_CHIP_TYPE ChipType;
- enum HAL_CUT_VERSION CUTVersion;
- enum HAL_VENDOR VendorType;
-};
-
-/* Get element */
-#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
-#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
-
-/* HAL_CHIP_TYPE_E */
-#define IS_NORMAL_CHIP(version) \
- (GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP)
-
-/* HAL_VENDOR_E */
-#define IS_CHIP_VENDOR_TSMC(version) \
- (GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)
-
-#endif
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
deleted file mode 100644
index 159990facb8a..000000000000
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-/*-----------------------------------------------------------------------------
-
- For type defines and data structure defines
-
-------------------------------------------------------------------------------*/
-
-#ifndef __DRV_TYPES_H__
-#define __DRV_TYPES_H__
-
-#include "osdep_service.h"
-#include "wlan_bssdef.h"
-#include "rtw_ht.h"
-#include "rtw_cmd.h"
-#include "rtw_xmit.h"
-#include "rtw_recv.h"
-#include "hal_intf.h"
-#include "hal_com.h"
-#include "rtw_security.h"
-#include "rtw_pwrctrl.h"
-#include "rtw_io.h"
-#include "rtw_eeprom.h"
-#include "sta_info.h"
-#include "rtw_mlme.h"
-#include "rtw_rf.h"
-#include "rtw_event.h"
-#include "rtw_led.h"
-#include "rtw_mlme_ext.h"
-#include "rtw_p2p.h"
-#include "rtw_ap.h"
-#include "rtw_br_ext.h"
-#include "rtl8188e_hal.h"
-#include "rtw_fw.h"
-
-#define FW_RTL8188EU "rtlwifi/rtl8188eufw.bin"
-
-struct registry_priv {
- u8 rfintfs;
- u8 lbkmode;
- u8 hci;
- struct ndis_802_11_ssid ssid;
- u8 network_mode; /* infra, ad-hoc, auto */
- u8 channel;/* ad-hoc support requirement */
- u8 wireless_mode;/* A, B, G, auto */
- u8 scan_mode;/* active, passive */
- u8 radio_enable;
- u8 preamble;/* long, short, auto */
- u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
- u8 vcs_type;/* RTS/CTS, CTS-to-self */
- u16 rts_thresh;
- u16 frag_thresh;
- u8 adhoc_tx_pwr;
- u8 soft_ap;
- u8 power_mgnt;
- u8 ips_mode;
- u8 smart_ps;
- u8 long_retry_lmt;
- u8 short_retry_lmt;
- u16 busy_thresh;
- u8 ack_policy;
- u8 software_encrypt;
- u8 software_decrypt;
- u8 acm_method;
- /* UAPSD */
- u8 wmm_enable;
- u8 uapsd_enable;
- u8 uapsd_max_sp;
- u8 uapsd_acbk_en;
- u8 uapsd_acbe_en;
- u8 uapsd_acvi_en;
- u8 uapsd_acvo_en;
-
- u8 led_enable;
-
- struct wlan_bssid_ex dev_network;
-
- u8 ht_enable;
- u8 cbw40_enable;
- u8 ampdu_enable;/* for tx */
- u8 rx_stbc;
- u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
- u8 lowrate_two_xmit;
-
- u8 low_power;
-
- u8 wifi_spec;/* !turbo_mode */
-
- u8 channel_plan;
- bool bAcceptAddbaReq;
-
- u8 antdiv_cfg;
- u8 antdiv_type;
-
- u8 usbss_enable;/* 0:disable,1:enable */
- u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
- u8 hwpwrp_detect;/* 0:disable,1:enable */
-
- u8 hw_wps_pbc;/* 0:disable,1:enable */
-
- u8 max_roaming_times; /* the max number driver will try */
-
- u8 fw_iol; /* enable iol without other concern */
-
- u8 enable80211d;
-
- u8 ifname[16];
- u8 if2name[16];
-
- u8 notch_filter;
-};
-
-#define MAX_CONTINUAL_URB_ERR 4
-
-struct dvobj_priv {
- struct adapter *if1;
-
- /* For 92D, DMDP have 2 interface. */
- u8 InterfaceNumber;
- u8 NumInterfaces;
-
- /* In /Out Pipe information */
- int RtInPipe;
- int RtOutPipe[3];
- u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
-
- struct rt_firmware firmware;
-
-/*-------- below is for USB INTERFACE --------*/
-
- u8 RtNumOutPipes;
-
- struct usb_interface *pusbintf;
- struct usb_device *pusbdev;
-
- atomic_t continual_urb_error;
-};
-
-static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
-{
- /* todo: get interface type from dvobj and the return
- * the dev accordingly */
- return &dvobj->pusbintf->dev;
-};
-
-struct adapter {
- int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
-
- struct dvobj_priv *dvobj;
- struct mlme_priv mlmepriv;
- struct mlme_ext_priv mlmeextpriv;
- struct cmd_priv cmdpriv;
- struct evt_priv evtpriv;
- struct xmit_priv xmitpriv;
- struct recv_priv recvpriv;
- struct sta_priv stapriv;
- struct security_priv securitypriv;
- struct registry_priv registrypriv;
- struct pwrctrl_priv pwrctrlpriv;
- struct eeprom_priv eeprompriv;
- struct led_priv ledpriv;
- struct wifidirect_info wdinfo;
-
- struct hal_data_8188e haldata;
-
- s32 bDriverStopped;
- s32 bSurpriseRemoved;
-
- u8 hw_init_completed;
- s8 signal_strength;
-
- void *cmdThread;
- struct net_device *pnetdev;
-
- /* used by rtw_rereg_nd_name related function */
- struct rereg_nd_name_data {
- struct net_device *old_pnetdev;
- char old_ifname[IFNAMSIZ];
- u8 old_ips_mode;
- u8 old_bRegUseLed;
- } rereg_nd_name_priv;
-
- int bup;
- struct net_device_stats stats;
- struct iw_statistics iwstats;
-
- int net_closed;
- u8 bFWReady;
- u8 bReadPortCancel;
- u8 bWritePortCancel;
- u8 bRxRSSIDisplay;
- /* The driver will show up the desired channel number
- * when this flag is 1. */
- u8 bNotifyChannelChange;
- /* The driver will show the current P2P status when the
- * upper application reads it. */
- u8 bShowGetP2PState;
- struct adapter *pbuddy_adapter;
-
- struct mutex *hw_init_mutex;
-
- spinlock_t br_ext_lock;
- struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
- int pppoe_connection_in_progress;
- unsigned char pppoe_addr[ETH_ALEN];
- unsigned char scdb_mac[ETH_ALEN];
- unsigned char scdb_ip[4];
- struct nat25_network_db_entry *scdb_entry;
- unsigned char br_mac[ETH_ALEN];
- unsigned char br_ip[4];
- struct br_ext_info ethBrExtInfo;
-};
-
-#define adapter_to_dvobj(adapter) (adapter->dvobj)
-
-void rtw_handle_dualmac(struct adapter *adapter, bool init);
-
-static inline u8 *myid(struct eeprom_priv *peepriv)
-{
- return peepriv->mac_addr;
-}
-
-#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/r8188eu/include/hal_com.h b/drivers/staging/r8188eu/include/hal_com.h
deleted file mode 100644
index cd3f845e146a..000000000000
--- a/drivers/staging/r8188eu/include/hal_com.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HAL_COMMON_H__
-#define __HAL_COMMON_H__
-
-/* */
-/* Rate Definition */
-/* */
-/* CCK */
-#define RATR_1M 0x00000001
-#define RATR_2M 0x00000002
-#define RATR_55M 0x00000004
-#define RATR_11M 0x00000008
-/* OFDM */
-#define RATR_6M 0x00000010
-#define RATR_9M 0x00000020
-#define RATR_12M 0x00000040
-#define RATR_18M 0x00000080
-#define RATR_24M 0x00000100
-#define RATR_36M 0x00000200
-#define RATR_48M 0x00000400
-#define RATR_54M 0x00000800
-/* MCS 1 Spatial Stream */
-#define RATR_MCS0 0x00001000
-#define RATR_MCS1 0x00002000
-#define RATR_MCS2 0x00004000
-#define RATR_MCS3 0x00008000
-#define RATR_MCS4 0x00010000
-#define RATR_MCS5 0x00020000
-#define RATR_MCS6 0x00040000
-#define RATR_MCS7 0x00080000
-/* MCS 2 Spatial Stream */
-#define RATR_MCS8 0x00100000
-#define RATR_MCS9 0x00200000
-#define RATR_MCS10 0x00400000
-#define RATR_MCS11 0x00800000
-#define RATR_MCS12 0x01000000
-#define RATR_MCS13 0x02000000
-#define RATR_MCS14 0x04000000
-#define RATR_MCS15 0x08000000
-
-/* CCK */
-#define RATE_1M BIT(0)
-#define RATE_2M BIT(1)
-#define RATE_5_5M BIT(2)
-#define RATE_11M BIT(3)
-/* OFDM */
-#define RATE_6M BIT(4)
-#define RATE_9M BIT(5)
-#define RATE_12M BIT(6)
-#define RATE_18M BIT(7)
-#define RATE_24M BIT(8)
-#define RATE_36M BIT(9)
-#define RATE_48M BIT(10)
-#define RATE_54M BIT(11)
-/* MCS 1 Spatial Stream */
-#define RATE_MCS0 BIT(12)
-#define RATE_MCS1 BIT(13)
-#define RATE_MCS2 BIT(14)
-#define RATE_MCS3 BIT(15)
-#define RATE_MCS4 BIT(16)
-#define RATE_MCS5 BIT(17)
-#define RATE_MCS6 BIT(18)
-#define RATE_MCS7 BIT(19)
-/* MCS 2 Spatial Stream */
-#define RATE_MCS8 BIT(20)
-#define RATE_MCS9 BIT(21)
-#define RATE_MCS10 BIT(22)
-#define RATE_MCS11 BIT(23)
-#define RATE_MCS12 BIT(24)
-#define RATE_MCS13 BIT(25)
-#define RATE_MCS14 BIT(26)
-#define RATE_MCS15 BIT(27)
-
-/* ALL CCK Rate */
-#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
-#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \
- RATR_24M | RATR_36M | RATR_48M | RATR_54M)
-#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
- RATR_MCS3 | RATR_MCS4 | RATR_MCS5|RATR_MCS6 | \
- RATR_MCS7)
-#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
- RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
- RATR_MCS14 | RATR_MCS15)
-
-/*------------------------------ Tx Desc definition Macro --------------------*/
-/* pragma mark -- Tx Desc related definition. -- */
-/* Rate */
-/* CCK Rates, TxHT = 0 */
-#define DESC_RATE1M 0x00
-#define DESC_RATE2M 0x01
-#define DESC_RATE5_5M 0x02
-#define DESC_RATE11M 0x03
-
-/* OFDM Rates, TxHT = 0 */
-#define DESC_RATE6M 0x04
-#define DESC_RATE9M 0x05
-#define DESC_RATE12M 0x06
-#define DESC_RATE18M 0x07
-#define DESC_RATE24M 0x08
-#define DESC_RATE36M 0x09
-#define DESC_RATE48M 0x0a
-#define DESC_RATE54M 0x0b
-
-/* MCS Rates, TxHT = 1 */
-#define DESC_RATEMCS0 0x0c
-#define DESC_RATEMCS1 0x0d
-#define DESC_RATEMCS2 0x0e
-#define DESC_RATEMCS3 0x0f
-#define DESC_RATEMCS4 0x10
-#define DESC_RATEMCS5 0x11
-#define DESC_RATEMCS6 0x12
-#define DESC_RATEMCS7 0x13
-#define DESC_RATEMCS8 0x14
-#define DESC_RATEMCS9 0x15
-#define DESC_RATEMCS10 0x16
-#define DESC_RATEMCS11 0x17
-#define DESC_RATEMCS12 0x18
-#define DESC_RATEMCS13 0x19
-#define DESC_RATEMCS14 0x1a
-#define DESC_RATEMCS15 0x1b
-#define DESC_RATEMCS15_SG 0x1c
-#define DESC_RATEMCS32 0x20
-
-/* 1 Byte long (in unit of TU) */
-#define REG_P2P_CTWIN 0x0572
-#define REG_NOA_DESC_SEL 0x05CF
-#define REG_NOA_DESC_DURATION 0x05E0
-#define REG_NOA_DESC_INTERVAL 0x05E4
-#define REG_NOA_DESC_START 0x05E8
-#define REG_NOA_DESC_COUNT 0x05EC
-
-/* return the final channel plan decision */
-u8 hal_com_get_channel_plan(struct adapter *padapter,
- u8 hw_channel_plan,
- u8 sw_channel_plan,
- u8 def_channel_plan,
- bool AutoLoadFail
-);
-
-u8 MRateToHwRate(u8 rate);
-
-void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg);
-
-#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
deleted file mode 100644
index 296aa5b8268d..000000000000
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef __HAL_INTF_H__
-#define __HAL_INTF_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "Hal8188EPhyCfg.h"
-
-typedef s32 (*c2h_id_filter)(u8 id);
-
-int rtl8188eu_interface_configure(struct adapter *adapt);
-int ReadAdapterInfo8188EU(struct adapter *Adapter);
-void rtl8188eu_init_default_value(struct adapter *adapt);
-void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
-u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState);
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf);
-
-void hal_notch_filter_8188e(struct adapter *adapter, bool enable);
-
-void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt);
-void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
-
-int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
- struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
-
-int rtl8188eu_inirp_init(struct adapter *Adapter);
-
-uint rtw_hal_init(struct adapter *padapter);
-uint rtw_hal_deinit(struct adapter *padapter);
-void rtw_hal_stop(struct adapter *padapter);
-
-u32 rtl8188eu_hal_init(struct adapter *Adapter);
-u32 rtl8188eu_hal_deinit(struct adapter *Adapter);
-
-void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
-void rtw_hal_clone_data(struct adapter *dst_adapt,
- struct adapter *src_adapt);
-
-u8 rtw_do_join(struct adapter *padapter);
-
-#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
deleted file mode 100644
index e7a4f8af497a..000000000000
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ /dev/null
@@ -1,817 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __IEEE80211_H
-#define __IEEE80211_H
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "wifi.h"
-#include <linux/wireless.h>
-
-#define MGMT_QUEUE_NUM 5
-
-#define ETH_TYPE_LEN 2
-#define PAYLOAD_TYPE_LEN 1
-
-#define RTL_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 28)
-
-/* STA flags */
-#define WLAN_STA_AUTH BIT(0)
-#define WLAN_STA_ASSOC BIT(1)
-#define WLAN_STA_PS BIT(2)
-#define WLAN_STA_TIM BIT(3)
-#define WLAN_STA_PERM BIT(4)
-#define WLAN_STA_AUTHORIZED BIT(5)
-#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
-#define WLAN_STA_SHORT_PREAMBLE BIT(7)
-#define WLAN_STA_PREAUTH BIT(8)
-#define WLAN_STA_WME BIT(9)
-#define WLAN_STA_MFP BIT(10)
-#define WLAN_STA_HT BIT(11)
-#define WLAN_STA_WPS BIT(12)
-#define WLAN_STA_MAYBE_WPS BIT(13)
-#define WLAN_STA_NONERP BIT(31)
-
-#define IEEE_CMD_SET_WPA_PARAM 1
-#define IEEE_CMD_SET_WPA_IE 2
-#define IEEE_CMD_SET_ENCRYPTION 3
-#define IEEE_CMD_MLME 4
-
-#define IEEE_PARAM_WPA_ENABLED 1
-#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
-#define IEEE_PARAM_DROP_UNENCRYPTED 3
-#define IEEE_PARAM_PRIVACY_INVOKED 4
-#define IEEE_PARAM_AUTH_ALGS 5
-#define IEEE_PARAM_IEEE_802_1X 6
-#define IEEE_PARAM_WPAX_SELECT 7
-
-#define AUTH_ALG_OPEN_SYSTEM 0x1
-#define AUTH_ALG_SHARED_KEY 0x2
-#define AUTH_ALG_LEAP 0x00000004
-
-#define IEEE_MLME_STA_DEAUTH 1
-#define IEEE_MLME_STA_DISASSOC 2
-
-#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
-#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
-#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
-#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
-
-#define IEEE_CRYPT_ALG_NAME_LEN 16
-
-#define WPA_CIPHER_NONE BIT(0)
-#define WPA_CIPHER_WEP40 BIT(1)
-#define WPA_CIPHER_WEP104 BIT(2)
-#define WPA_CIPHER_TKIP BIT(3)
-#define WPA_CIPHER_CCMP BIT(4)
-
-
-#define WPA_SELECTOR_LEN 4
-extern u8 RTW_WPA_OUI_TYPE[];
-extern u16 RTW_WPA_VERSION;
-extern u8 WPA_AUTH_KEY_MGMT_NONE[];
-extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[];
-extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
-extern u8 WPA_CIPHER_SUITE_NONE[];
-extern u8 WPA_CIPHER_SUITE_WEP40[];
-extern u8 WPA_CIPHER_SUITE_TKIP[];
-extern u8 WPA_CIPHER_SUITE_WRAP[];
-extern u8 WPA_CIPHER_SUITE_CCMP[];
-extern u8 WPA_CIPHER_SUITE_WEP104[];
-
-#define RSN_HEADER_LEN 4
-#define RSN_SELECTOR_LEN 4
-
-extern u16 RSN_VERSION_BSD;
-extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[];
-extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
-extern u8 RSN_CIPHER_SUITE_NONE[];
-extern u8 RSN_CIPHER_SUITE_WEP40[];
-extern u8 RSN_CIPHER_SUITE_TKIP[];
-extern u8 RSN_CIPHER_SUITE_WRAP[];
-extern u8 RSN_CIPHER_SUITE_CCMP[];
-extern u8 RSN_CIPHER_SUITE_WEP104[];
-
-enum ratr_table_mode {
- RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
- RATR_INX_WIRELESS_NG = 1, /* GN or N */
- RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
- RATR_INX_WIRELESS_N = 3,
- RATR_INX_WIRELESS_GB = 4,
- RATR_INX_WIRELESS_G = 5,
- RATR_INX_WIRELESS_B = 6,
- RATR_INX_WIRELESS_MC = 7,
- RATR_INX_WIRELESS_AC_N = 8,
-};
-
-enum NETWORK_TYPE {
- WIRELESS_INVALID = 0,
- /* Sub-Element */
- WIRELESS_11B = BIT(0), /* tx:cck only, rx:cck only, hw: cck */
- WIRELESS_11G = BIT(1), /* tx:ofdm only, rx:ofdm & cck, hw:cck & ofdm*/
- WIRELESS_11_24N = BIT(3), /* tx:MCS only, rx:MCS & cck, hw:MCS & cck */
-
- /* Combination */
- /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
- WIRELESS_11BG = (WIRELESS_11B | WIRELESS_11G),
- /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
- WIRELESS_11G_24N = (WIRELESS_11G | WIRELESS_11_24N),
- /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
- WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
-};
-
-struct ieee_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 name;
- u32 value;
- } wpa_param;
- struct {
- u32 len;
- u8 reserved[32];
- u8 data[];
- } wpa_ie;
- struct {
- int command;
- int reason_code;
- } mlme;
- struct {
- u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
- u8 set_tx;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[];
- } crypt;
- struct {
- u16 aid;
- u16 capability;
- int flags;
- u8 tx_supp_rates[16];
- struct ieee80211_ht_cap ht_cap;
- } add_sta;
- struct {
- u8 reserved[2];/* for set max_num_sta */
- u8 buf[];
- } bcn_ie;
- } u;
-};
-
-#define IEEE80211_DATA_LEN 2304
-/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- 6.2.1.1.2.
-
- The figure in section 7.1.2 suggests a body size of up to 2312
- bytes is allowed, which is a bit confusing, I suspect this
- represents the 2304 bytes of real data, plus a possible 8 bytes of
- WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
-
-#define IEEE80211_HLEN 30
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-/* this is stolen from ipw2200 driver */
-#define IEEE_IBSS_MAC_HASH_SIZE 31
-
-#define IEEE80211_3ADDR_LEN 24
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_FCS_LEN 4
-
-#define MIN_FRAG_THRESHOLD 256U
-#define MAX_FRAG_THRESHOLD 2346U
-
-/* Frame control field constants */
-#define RTW_IEEE80211_FCTL_VERS 0x0003
-#define RTW_IEEE80211_FCTL_FTYPE 0x000c
-#define RTW_IEEE80211_FCTL_STYPE 0x00f0
-#define RTW_IEEE80211_FCTL_TODS 0x0100
-#define RTW_IEEE80211_FCTL_FROMDS 0x0200
-#define RTW_IEEE80211_FCTL_MOREFRAGS 0x0400
-#define RTW_IEEE80211_FCTL_RETRY 0x0800
-#define RTW_IEEE80211_FCTL_PM 0x1000
-#define RTW_IEEE80211_FCTL_MOREDATA 0x2000
-#define RTW_IEEE80211_FCTL_PROTECTED 0x4000
-#define RTW_IEEE80211_FCTL_ORDER 0x8000
-#define RTW_IEEE80211_FCTL_CTL_EXT 0x0f00
-
-#define RTW_IEEE80211_FTYPE_MGMT 0x0000
-#define RTW_IEEE80211_FTYPE_CTL 0x0004
-#define RTW_IEEE80211_FTYPE_DATA 0x0008
-#define RTW_IEEE80211_FTYPE_EXT 0x000c
-
-/* management */
-#define RTW_IEEE80211_STYPE_ASSOC_REQ 0x0000
-#define RTW_IEEE80211_STYPE_ASSOC_RESP 0x0010
-#define RTW_IEEE80211_STYPE_REASSOC_REQ 0x0020
-#define RTW_IEEE80211_STYPE_REASSOC_RESP 0x0030
-#define RTW_IEEE80211_STYPE_PROBE_REQ 0x0040
-#define RTW_IEEE80211_STYPE_PROBE_RESP 0x0050
-#define RTW_IEEE80211_STYPE_BEACON 0x0080
-#define RTW_IEEE80211_STYPE_ATIM 0x0090
-#define RTW_IEEE80211_STYPE_DISASSOC 0x00A0
-#define RTW_IEEE80211_STYPE_AUTH 0x00B0
-#define RTW_IEEE80211_STYPE_DEAUTH 0x00C0
-#define RTW_IEEE80211_STYPE_ACTION 0x00D0
-
-/* control */
-#define RTW_IEEE80211_STYPE_CTL_EXT 0x0060
-#define RTW_IEEE80211_STYPE_BACK_REQ 0x0080
-#define RTW_IEEE80211_STYPE_BACK 0x0090
-#define RTW_IEEE80211_STYPE_PSPOLL 0x00A0
-#define RTW_IEEE80211_STYPE_RTS 0x00B0
-#define RTW_IEEE80211_STYPE_CTS 0x00C0
-#define RTW_IEEE80211_STYPE_ACK 0x00D0
-#define RTW_IEEE80211_STYPE_CFEND 0x00E0
-#define RTW_IEEE80211_STYPE_CFENDACK 0x00F0
-
-/* data */
-#define RTW_IEEE80211_STYPE_DATA 0x0000
-#define RTW_IEEE80211_STYPE_DATA_CFACK 0x0010
-#define RTW_IEEE80211_STYPE_DATA_CFPOLL 0x0020
-#define RTW_IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
-#define RTW_IEEE80211_STYPE_NULLFUNC 0x0040
-#define RTW_IEEE80211_STYPE_CFACK 0x0050
-#define RTW_IEEE80211_STYPE_CFPOLL 0x0060
-#define RTW_IEEE80211_STYPE_CFACKPOLL 0x0070
-#define RTW_IEEE80211_STYPE_QOS_DATA 0x0080
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFACK 0x0090
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0
-#define RTW_IEEE80211_STYPE_QOS_NULLFUNC 0x00C0
-#define RTW_IEEE80211_STYPE_QOS_CFACK 0x00D0
-#define RTW_IEEE80211_STYPE_QOS_CFPOLL 0x00E0
-#define RTW_IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
-
-/* sequence control field */
-#define RTW_IEEE80211_SCTL_FRAG 0x000F
-#define RTW_IEEE80211_SCTL_SEQ 0xFFF0
-
-#define RTW_ERP_INFO_NON_ERP_PRESENT BIT(0)
-#define RTW_ERP_INFO_USE_PROTECTION BIT(1)
-#define RTW_ERP_INFO_BARKER_PREAMBLE_MODE BIT(2)
-
-/* QoS, QOS */
-#define NORMAL_ACK 0
-#define NO_ACK 1
-#define NON_EXPLICIT_ACK 2
-#define BLOCK_ACK 3
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
-
-#define ETH_P_ECONET 0x0018
-
-#ifndef ETH_P_80211_RAW
-#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
-#endif
-
-/* IEEE 802.11 defines */
-
-#define P80211_OUI_LEN 3
-
-struct ieee80211_snap_hdr {
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-} __packed;
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define WLAN_FC_GET_TYPE(fc) ((fc) & RTW_IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & RTW_IEEE80211_FCTL_STYPE)
-
-#define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
-
-#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
-#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
-
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-
-#define WLAN_AUTH_CHALLENGE_LEN 128
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-
-/* Status codes */
-#define WLAN_STATUS_SUCCESS 0
-#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
-#define WLAN_STATUS_CAPS_UNSUPPORTED 10
-#define WLAN_STATUS_REASSOC_NO_ASSOC 11
-#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
-#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
-#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
-#define WLAN_STATUS_CHALLENGE_FAIL 15
-#define WLAN_STATUS_AUTH_TIMEOUT 16
-#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
-#define WLAN_STATUS_ASSOC_DENIED_RATES 18
-/* 802.11b */
-#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
-#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
-#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
-
-/* Reason codes */
-#define WLAN_REASON_UNSPECIFIED 1
-#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
-#define WLAN_REASON_DEAUTH_LEAVING 3
-#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
-#define WLAN_REASON_DISASSOC_AP_BUSY 5
-#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
-#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
-#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
-#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9
-#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
-#define WLAN_REASON_EXPIRATION_CHK 65535
-
-/* Information Element IDs */
-#define WLAN_EID_SSID 0
-#define WLAN_EID_SUPP_RATES 1
-#define WLAN_EID_FH_PARAMS 2
-#define WLAN_EID_DS_PARAMS 3
-#define WLAN_EID_CF_PARAMS 4
-#define WLAN_EID_TIM 5
-#define WLAN_EID_IBSS_PARAMS 6
-#define WLAN_EID_CHALLENGE 16
-/* EIDs defined by IEEE 802.11h - START */
-#define WLAN_EID_PWR_CONSTRAINT 32
-#define WLAN_EID_PWR_CAPABILITY 33
-#define WLAN_EID_TPC_REQUEST 34
-#define WLAN_EID_TPC_REPORT 35
-#define WLAN_EID_SUPPORTED_CHANNELS 36
-#define WLAN_EID_CHANNEL_SWITCH 37
-#define WLAN_EID_MEASURE_REQUEST 38
-#define WLAN_EID_MEASURE_REPORT 39
-#define WLAN_EID_QUITE 40
-#define WLAN_EID_IBSS_DFS 41
-/* EIDs defined by IEEE 802.11h - END */
-#define WLAN_EID_ERP_INFO 42
-#define WLAN_EID_HT_CAP 45
-#define WLAN_EID_RSN 48
-#define WLAN_EID_EXT_SUPP_RATES 50
-#define WLAN_EID_MOBILITY_DOMAIN 54
-#define WLAN_EID_FAST_BSS_TRANSITION 55
-#define WLAN_EID_TIMEOUT_INTERVAL 56
-#define WLAN_EID_RIC_DATA 57
-#define WLAN_EID_HT_OPERATION 61
-#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
-#define WLAN_EID_20_40_BSS_COEXISTENCE 72
-#define WLAN_EID_20_40_BSS_INTOLERANT 73
-#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
-#define WLAN_EID_MMIE 76
-#define WLAN_EID_VENDOR_SPECIFIC 221
-#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
-
-#define IEEE80211_MGMT_HDR_LEN 24
-#define IEEE80211_DATA_HDR3_LEN 24
-#define IEEE80211_DATA_HDR4_LEN 30
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
-#define IEEE80211_STATMASK_WEMASK 0x7
-
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
-
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
-
-#define IEEE80211_CCK_RATE_LEN 4
-#define IEEE80211_NUM_OFDM_RATESLEN 8
-
-#define IEEE80211_CCK_RATE_1MB 0x02
-#define IEEE80211_CCK_RATE_2MB 0x04
-#define IEEE80211_CCK_RATE_5MB 0x0B
-#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
-#define IEEE80211_OFDM_RATE_6MB 0x0C
-#define IEEE80211_OFDM_RATE_9MB 0x12
-#define IEEE80211_OFDM_RATE_12MB 0x18
-#define IEEE80211_OFDM_RATE_18MB 0x24
-#define IEEE80211_OFDM_RATE_24MB 0x30
-#define IEEE80211_OFDM_RATE_36MB 0x48
-#define IEEE80211_OFDM_RATE_48MB 0x60
-#define IEEE80211_OFDM_RATE_54MB 0x6C
-#define IEEE80211_BASIC_RATE_MASK 0x80
-
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
-
-#define IEEE80211_CCK_RATES_MASK 0x0000000F
-#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
- IEEE80211_CCK_RATE_2MB_MASK)
-#define IEEE80211_CCK_DEFAULT_RATES_MASK \
- (IEEE80211_CCK_BASIC_RATES_MASK | \
- IEEE80211_CCK_RATE_5MB_MASK | \
- IEEE80211_CCK_RATE_11MB_MASK)
-
-#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
-#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
- IEEE80211_OFDM_RATE_12MB_MASK | \
- IEEE80211_OFDM_RATE_24MB_MASK)
-#define IEEE80211_OFDM_DEFAULT_RATES_MASK \
- (IEEE80211_OFDM_BASIC_RATES_MASK | \
- IEEE80211_OFDM_RATE_9MB_MASK | \
- IEEE80211_OFDM_RATE_18MB_MASK | \
- IEEE80211_OFDM_RATE_36MB_MASK | \
- IEEE80211_OFDM_RATE_48MB_MASK | \
- IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK \
- (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
- IEEE80211_CCK_DEFAULT_RATES_MASK)
-
-#define IEEE80211_NUM_OFDM_RATES 8
-#define IEEE80211_NUM_CCK_RATES 4
-#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-/* IEEE 802.11 requires that STA supports concurrent reception of at least
- * three fragmented frames. This define can be increased to support more
- * concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
-#define IEEE80211_FRAG_CACHE_LEN 4
-
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
-
-#define SEC_LEVEL_0 0 /* None */
-#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
-
-/*
-
- 802.11 data frame from AP
-
- ,-------------------------------------------------------------------.
-Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- |------|------|---------|---------|---------|------|---------|------|
-Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
- | | tion | (BSSID) | | | ence | data | |
- `-------------------------------------------------------------------'
-
-Total: 28-2340 bytes
-
-*/
-
-#define BEACON_PROBE_SSID_ID_POSITION 12
-
-/* Management Frame Information Element Types */
-#define MFIE_TYPE_SSID 0
-#define MFIE_TYPE_RATES 1
-#define MFIE_TYPE_FH_SET 2
-#define MFIE_TYPE_DS_SET 3
-#define MFIE_TYPE_CF_SET 4
-#define MFIE_TYPE_TIM 5
-#define MFIE_TYPE_IBSS_SET 6
-#define MFIE_TYPE_CHALLENGE 16
-#define MFIE_TYPE_ERP 42
-#define MFIE_TYPE_RSN 48
-#define MFIE_TYPE_RATES_EX 50
-#define MFIE_TYPE_GENERIC 221
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 10
-
-/* SWEEP TABLE ENTRIES NUMBER*/
-#define MAX_SWEEP_TAB_ENTRIES 42
-#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
-/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
- * only use 8, and then use extended rates for the remaining supported
- * rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
-#define MAX_RATES_LENGTH ((u8)12)
-#define MAX_RATES_EX_LENGTH ((u8)16)
-#define MAX_NETWORK_COUNT 128
-#define MAX_CHANNEL_NUMBER 161
-#define IEEE80211_SOFTMAC_SCAN_TIME 400
-/* HZ / 2) */
-#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
-
-#define CRC_LENGTH 4U
-
-#define MAX_WPA_IE_LEN (256)
-#define MAX_WPS_IE_LEN (512)
-#define MAX_P2P_IE_LEN (256)
-#define MAX_WFD_IE_LEN (128)
-
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
-
-#define IEEE80211_DTIM_MBCAST 4
-#define IEEE80211_DTIM_UCAST 2
-#define IEEE80211_DTIM_VALID 1
-#define IEEE80211_DTIM_INVALID 0
-
-#define IEEE80211_PS_DISABLED 0
-#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
-#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
-#define IW_ESSID_MAX_SIZE 32
-/*
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-*/
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-#define DEFAULT_FTS 2346
-
-static inline int is_multicast_mac_addr(const u8 *addr)
-{
- return ((addr[0] != 0xff) && (0x01 & addr[0]));
-}
-
-static inline int is_broadcast_mac_addr(const u8 *addr)
-{
- return (addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&
- (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
-}
-
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-
-#define MAXTID 16
-
-/* Action category code */
-enum rtw_ieee80211_category {
- RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
-};
-
-/* SPECTRUM_MGMT action code */
-enum rtw_ieee80211_spectrum_mgmt_actioncode {
- RTW_WLAN_ACTION_SPCT_MSR_REQ = 0,
- RTW_WLAN_ACTION_SPCT_MSR_RPRT = 1,
- RTW_WLAN_ACTION_SPCT_TPC_REQ = 2,
- RTW_WLAN_ACTION_SPCT_TPC_RPRT = 3,
- RTW_WLAN_ACTION_SPCT_CHL_SWITCH = 4,
- RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
-};
-
-enum _PUBLIC_ACTION {
- ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
- ACT_PUBLIC_DSE_ENABLE = 1,
- ACT_PUBLIC_DSE_DEENABLE = 2,
- ACT_PUBLIC_DSE_REG_LOCATION = 3,
- ACT_PUBLIC_EXT_CHL_SWITCH = 4,
- ACT_PUBLIC_DSE_MSR_REQ = 5,
- ACT_PUBLIC_DSE_MSR_RPRT = 6,
- ACT_PUBLIC_MP = 7, /* Measurement Pilot */
- ACT_PUBLIC_DSE_PWR_CONSTRAINT = 8,
- ACT_PUBLIC_VENDOR = 9, /* for WIFI_DIRECT */
- ACT_PUBLIC_GAS_INITIAL_REQ = 10,
- ACT_PUBLIC_GAS_INITIAL_RSP = 11,
- ACT_PUBLIC_GAS_COMEBACK_REQ = 12,
- ACT_PUBLIC_GAS_COMEBACK_RSP = 13,
- ACT_PUBLIC_TDLS_DISCOVERY_RSP = 14,
- ACT_PUBLIC_LOCATION_TRACK = 15,
- ACT_PUBLIC_MAX
-};
-
-#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
- * 00:50:F2 */
-#define WME_OUI_TYPE 2
-#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
-#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
-#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
-#define WME_VERSION 1
-
-#define WME_ACTION_CODE_SETUP_REQUEST 0
-#define WME_ACTION_CODE_SETUP_RESPONSE 1
-#define WME_ACTION_CODE_TEARDOWN 2
-
-#define WME_SETUP_RESPONSE_STATUS_ADMISSION_ACCEPTED 0
-#define WME_SETUP_RESPONSE_STATUS_INVALID_PARAMETERS 1
-#define WME_SETUP_RESPONSE_STATUS_REFUSED 3
-
-#define WME_TSPEC_DIRECTION_UPLINK 0
-#define WME_TSPEC_DIRECTION_DOWNLINK 1
-#define WME_TSPEC_DIRECTION_BI_DIRECTIONAL 3
-
-#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
-
-#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
-
-/**
- * enum rtw_ieee80211_channel_flags - channel flags
- *
- * Channel flags set by the regulatory control code.
- *
- * @RTW_IEEE80211_CHAN_DISABLED: This channel is disabled.
- * @RTW_IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted
- * on this channel.
- * @RTW_IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
- * @RTW_IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
- * @RTW_IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
- * is not permitted.
- * @RTW_IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
- * is not permitted.
- */
-enum rtw_ieee80211_channel_flags {
- RTW_IEEE80211_CHAN_DISABLED = 1<<0,
- RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
- RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
- RTW_IEEE80211_CHAN_RADAR = 1<<3,
- RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
-};
-
-#define RTW_IEEE80211_CHAN_NO_HT40 \
- (RTW_IEEE80211_CHAN_NO_HT40PLUS | RTW_IEEE80211_CHAN_NO_HT40MINUS)
-
-/* Represent channel details, subset of ieee80211_channel */
-struct rtw_ieee80211_channel {
- u16 hw_value;
- u32 flags;
-};
-
-#define CHAN_FMT \
- "hw_value:%u, " \
- "flags:0x%08x" \
-
-#define CHAN_ARG(channel) \
- (channel)->hw_value \
- , (channel)->flags \
-
-/* Parsed Information Elements */
-struct rtw_ieee802_11_elems {
- u8 *ssid;
- u8 ssid_len;
- u8 *supp_rates;
- u8 supp_rates_len;
- u8 *fh_params;
- u8 fh_params_len;
- u8 *ds_params;
- u8 ds_params_len;
- u8 *cf_params;
- u8 cf_params_len;
- u8 *tim;
- u8 tim_len;
- u8 *ibss_params;
- u8 ibss_params_len;
- u8 *challenge;
- u8 challenge_len;
- u8 *erp_info;
- u8 erp_info_len;
- u8 *ext_supp_rates;
- u8 ext_supp_rates_len;
- u8 *wpa_ie;
- u8 wpa_ie_len;
- u8 *rsn_ie;
- u8 rsn_ie_len;
- u8 *wme;
- u8 wme_len;
- u8 *wme_tspec;
- u8 wme_tspec_len;
- u8 *wps_ie;
- u8 wps_ie_len;
- u8 *power_cap;
- u8 power_cap_len;
- u8 *supp_channels;
- u8 supp_channels_len;
- u8 *mdie;
- u8 mdie_len;
- u8 *ftie;
- u8 ftie_len;
- u8 *timeout_int;
- u8 timeout_int_len;
- u8 *ht_capabilities;
- u8 ht_capabilities_len;
- u8 *ht_operation;
- u8 ht_operation_len;
- u8 *vendor_ht_cap;
- u8 vendor_ht_cap_len;
-};
-
-enum parse_res {
- ParseOK = 0,
- ParseUnknown = 1,
- ParseFailed = -1
-};
-
-enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
- struct rtw_ieee802_11_elems *elems,
- int show_errors);
-
-u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len,
- unsigned char *source, unsigned int *frlen);
-u8 *rtw_set_ie(u8 *pbuf, int index, uint len, u8 *source, uint *frlen);
-u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
-
-void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
-
-unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
-unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
-int rtw_get_wpa_cipher_suite(u8 *s);
-int rtw_get_wpa2_cipher_suite(u8 *s);
-int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len);
-int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
- int *pairwise_cipher, int *is_8021x);
-int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
- int *pairwise_cipher, int *is_8021x);
-
-int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
- u8 *wpa_ie, u16 *wpa_len);
-
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
-u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
-u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
- u8 *buf_attr, u32 *len_attr);
-u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
- u8 *buf_content, uint *len_content);
-
-/**
- * for_each_ie - iterate over continuous IEs
- * @ie:
- * @buf:
- * @buf_len:
- */
-#define for_each_ie(ie, buf, buf_len) \
- for (ie = (void *)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; \
- ie = (void *)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
-
-u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen);
-u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
- u8 *buf_attr, u32 *len_attr);
-u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
- u8 *buf_content, uint *len_content);
-u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len,
- u8 *pdata_attr);
-void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex,
- u8 attr_id);
-uint rtw_get_rateset_len(u8 *rateset);
-
-struct registry_priv;
-int rtw_generate_ie(struct registry_priv *pregistrypriv);
-
-int rtw_get_bit_value_from_ieee_value(u8 val);
-
-bool rtw_is_cckrates_included(u8 *rate);
-
-bool rtw_is_cckratesonly_included(u8 *rate);
-
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
-
-void rtw_get_bcn_info(struct wlan_network *pnetwork);
-
-void rtw_macaddr_cfg(u8 *mac_addr);
-
-u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate);
-
-#endif /* IEEE80211_H */
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
deleted file mode 100644
index 8cea166b7b73..000000000000
--- a/drivers/staging/r8188eu/include/odm.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HALDMOUTSRC_H__
-#define __HALDMOUTSRC_H__
-
-struct rtw_dig {
- u8 PreIGValue;
- u8 CurIGValue;
- u8 BackupIGValue;
-
- u8 rx_gain_range_max;
- u8 rx_gain_range_min;
-
- u8 CurCCK_CCAThres;
-
- u8 LargeFAHit;
- u8 ForbiddenIGI;
- u32 Recover_cnt;
-
- u8 DIG_Dynamic_MIN_0;
- bool bMediaConnect_0;
-
- u32 AntDiv_RSSI_max;
- u32 RSSI_max;
-};
-
-struct rtl_ps {
- u8 pre_rf_state;
- u8 cur_rf_state;
- u8 initialize;
- u32 reg_874;
- u32 reg_c70;
- u32 reg_85c;
- u32 reg_a74;
-
-};
-
-struct false_alarm_stats {
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_Mcs_fail;
- u32 Cnt_Ofdm_fail;
- u32 Cnt_Cck_fail;
- u32 Cnt_all;
- u32 Cnt_Fast_Fsync;
- u32 Cnt_SB_Search_fail;
- u32 Cnt_OFDM_CCA;
- u32 Cnt_CCK_CCA;
- u32 Cnt_CCA_all;
- u32 Cnt_BW_USC; /* Gary */
- u32 Cnt_BW_LSC; /* Gary */
-};
-
-#define ODM_ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
-
-struct sw_ant_switch {
- u8 CurAntenna;
- u8 SWAS_NoLink_State; /* Before link Antenna Switch check */
- u8 RxIdleAnt;
-};
-
-struct edca_turbo {
- bool bCurrentTurboEDCA;
- bool bIsCurRDLState;
- u32 prv_traffic_idx; /* edca turbo */
-};
-
-struct odm_rate_adapt {
- u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
- u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
- u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
- u32 LastRATR; /* RATR Register Content */
-};
-
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
-#define AVG_THERMAL_NUM 8
-
-struct odm_phy_dbg_info {
- /* ODM Write,debug info */
- s8 RxSNRdB[MAX_PATH_NUM_92CS];
- u64 NumQryPhyStatus;
- /* Others */
- s32 RxEVM[MAX_PATH_NUM_92CS];
-};
-
-struct odm_per_pkt_info {
- s8 Rate;
- u8 StationID;
- bool bPacketMatchBSSID;
- bool bPacketToSelf;
- bool bPacketBeacon;
-};
-
-/* 2011/10/20 MH Define Common info enum for all team. */
-
-enum odm_common_info_def {
- /* Fixed value: */
-
- /* HOOK BEFORE REG INIT----------- */
- ODM_CMNINFO_MP_TEST_CHIP,
- /* HOOK BEFORE REG INIT----------- */
-
-/* CALL BY VALUE------------- */
- ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
-/* CALL BY VALUE-------------*/
-};
-
-enum odm_ability_def {
- /* BB ODM section BIT 0-15 */
- ODM_BB_RSSI_MONITOR = BIT(4),
- ODM_BB_ANT_DIV = BIT(6),
- ODM_BB_PWR_TRA = BIT(8),
-};
-
-#define ODM_ITRF_USB 0x2
-#define ODM_CE 0x04
-
-/* ODM_CMNINFO_WM_MODE */
-enum odm_wireless_mode {
- ODM_WM_UNKNOW = 0x0,
- ODM_WM_B = BIT(0),
- ODM_WM_G = BIT(1),
- ODM_WM_N24G = BIT(3),
- ODM_WM_AUTO = BIT(5),
-};
-
-struct odm_ra_info {
- u8 RateID;
- u32 RateMask;
- u32 RAUseRate;
- u8 RateSGI;
- u8 RssiStaRA;
- u8 PreRssiStaRA;
- u8 SGIEnable;
- u8 DecisionRate;
- u8 PreRate;
- u8 HighestRate;
- u8 LowestRate;
- u32 NscUp;
- u32 NscDown;
- u16 RTY[5];
- u32 TOTAL;
- u16 DROP;
- u8 Active;
- u16 RptTime;
- u8 RAWaitingCounter;
- u8 RAPendingCounter;
- u8 PTActive; /* on or off */
- u8 PTTryState; /* 0 trying state, 1 for decision state */
- u8 PTStage; /* 0~6 */
- u8 PTStopCount; /* Stop PT counter */
- u8 PTPreRate; /* if rate change do PT */
- u8 PTPreRssi; /* if RSSI change 5% do PT */
- u8 PTModeSS; /* decide whitch rate should do PT */
- u8 RAstage; /* StageRA, decide how many times RA will be done
- * between PT */
- u8 PTSmoothFactor;
-};
-
-struct odm_rf_cal {
- /* for tx power tracking */
- u32 RegA24; /* for TempCCK */
- s32 RegE94;
- s32 RegE9C;
- s32 RegEB4;
- s32 RegEBC;
-
- u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
- * as default */
- u8 TM_Trigger;
- u8 InternalPA5G[2]; /* pathA / pathB */
-
- u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0,
- * and 1 for RFIC1 */
- u8 ThermalValue;
- u8 ThermalValue_LCK;
- u8 ThermalValue_IQK;
- u8 ThermalValue_DPK;
- u8 ThermalValue_AVG[AVG_THERMAL_NUM];
- u8 ThermalValue_AVG_index;
- u8 ThermalValue_RxGain;
- u8 ThermalValue_Crystal;
- u8 ThermalValue_DPKstore;
- u8 ThermalValue_DPKtrack;
- bool TxPowerTrackingInProgress;
- bool bDPKenable;
-
- bool bReloadtxpowerindex;
- u8 bRfPiEnable;
-
- u8 CCK_index;
- u8 OFDM_index;
- bool bDoneTxpower;
-
- u8 ThermalValue_HP[HP_THERMAL_NUM];
- u8 ThermalValue_HP_index;
-
- u8 Delta_IQK;
- u8 Delta_LCK;
-
- /* for IQK */
- u32 RegC04;
- u32 Reg874;
- u32 RegC08;
- u32 RegB68;
- u32 RegB6C;
- u32 Reg870;
- u32 Reg860;
- u32 Reg864;
-
- bool bIQKInitialized;
- bool bAntennaDetected;
- u32 ADDA_backup[IQK_ADDA_REG_NUM];
- u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
- u32 IQK_BB_backup_recover[9];
- u32 IQK_BB_backup[IQK_BB_REG_NUM];
-
- /* for APK */
- u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
- u8 bAPKdone;
- u8 bAPKThermalMeterIgnore;
- u8 bDPdone;
- u8 bDPPathAOK;
- u8 bDPPathBOK;
-};
-
-/* ODM Dynamic common info value definition */
-
-struct fast_ant_train {
- u8 antsel_rx_keep_0;
- u8 antsel_rx_keep_1;
- u8 antsel_rx_keep_2;
- u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
- u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
- u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
- u32 MainAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
- u32 AuxAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
- u32 MainAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
- u32 AuxAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
- u8 RxIdleAnt;
- bool bBecomeLinked;
-};
-
-enum ant_div_type {
- NO_ANTDIV = 0xFF,
- CG_TRX_HW_ANTDIV = 0x01,
- CGCS_RX_HW_ANTDIV = 0x02,
- FIXED_HW_ANTDIV = 0x03,
- CG_TRX_SMART_ANTDIV = 0x04,
-};
-
-/* Copy from SD4 defined structure. We use to support PHY DM integration. */
-struct odm_dm_struct {
- struct adapter *Adapter; /* For CE/NIC team */
-
-/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
- bool bCckHighPower;
- u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
- u8 ControlChannel;
-/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
-
-/* 1 COMMON INFORMATION */
- /* Init Value */
-/* HOOK BEFORE REG INIT----------- */
- /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ �K�K = 1/2/3/�K */
- u32 SupportAbility;
-
- u32 BK_SupportAbility;
- u8 AntDivType;
-/* HOOK BEFORE REG INIT----------- */
-
- /* Dynamic Value */
-/* POINTER REFERENCE----------- */
- /* Wireless mode B/G/A/N = BIT(0)/BIT(1)/BIT(2)/BIT(3) */
- u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
- /* Secondary channel offset don't_care/below/above = 0/1/2 */
- u8 *pSecChOffset;
- /* BW info 20M/40M/80M = 0/1/2 */
- enum ht_channel_width *pBandWidth;
- /* Central channel location Ch1/Ch2/.... */
- u8 *pChannel; /* central channel number */
-
- /* Common info for Status */
- bool *pbScanInProcess;
- bool *pbPowerSaving;
-/* POINTER REFERENCE----------- */
- /* */
-/* CALL BY VALUE------------- */
- bool bLinked;
- u8 RSSI_Min;
- bool bIsMPChip;
- bool bOneEntryOnly;
-/* CALL BY VALUE------------- */
-
- /* 2 Define STA info. */
- /* _ODM_STA_INFO */
- /* For MP, we need to reduce one array pointer for default port.?? */
- struct sta_info *pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
-
- u16 CurrminRptTime;
- struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
- * array index. STA MacID=0,
- * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
-
- /* Latest packet phy info (ODM write) */
- struct odm_phy_dbg_info PhyDbgInfo;
-
- /* ODM Structure */
- struct fast_ant_train DM_FatTable;
- struct rtw_dig DM_DigTable;
- struct rtl_ps DM_PSTable;
- struct false_alarm_stats FalseAlmCnt;
- struct sw_ant_switch DM_SWAT_Table;
-
- struct edca_turbo DM_EDCA_Table;
-
- /* PSD */
- bool bDMInitialGainEnable;
-
- struct odm_rate_adapt RateAdaptive;
-
- struct odm_rf_cal RFCalibrateInfo;
-
- /* TX power tracking */
- u8 BbSwingIdxOfdm;
- u8 BbSwingIdxOfdmCurrent;
- u8 BbSwingIdxOfdmBase;
- bool BbSwingFlagOfdm;
- u8 BbSwingIdxCck;
- u8 BbSwingIdxCckCurrent;
- u8 BbSwingIdxCckBase;
- bool BbSwingFlagCck;
-};
-
-enum odm_bb_config_type {
- CONFIG_BB_PHY_REG,
- CONFIG_BB_AGC_TAB,
- CONFIG_BB_AGC_TAB_2G,
- CONFIG_BB_PHY_REG_PG,
-};
-
-#define DM_DIG_MAX_NIC 0x4e
-#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
-
-#define DM_DIG_MAX_AP 0x32
-
-/* vivi 92c&92d has different definition, 20110504 */
-/* this is for 92c */
-#define DM_DIG_FA_TH0 0x200/* 0x20 */
-#define DM_DIG_FA_TH1 0x300/* 0x100 */
-#define DM_DIG_FA_TH2 0x400/* 0x200 */
-
-/* 3=========================================================== */
-/* 3 Rate Adaptive */
-/* 3=========================================================== */
-#define DM_RATR_STA_INIT 0
-#define DM_RATR_STA_HIGH 1
-#define DM_RATR_STA_MIDDLE 2
-#define DM_RATR_STA_LOW 3
-
-/* 3=========================================================== */
-/* 3 BB Power Save */
-/* 3=========================================================== */
-
-enum dm_rf {
- RF_Save = 0,
- RF_Normal = 1,
- RF_MAX = 2,
-};
-
-/* 3=========================================================== */
-/* 3 Antenna Diversity */
-/* 3=========================================================== */
-enum dm_swas {
- Antenna_A = 1,
- Antenna_B = 2,
- Antenna_MAX = 3,
-};
-
-/* Extern Global Variables. */
-#define OFDM_TABLE_SIZE_92D 43
-#define CCK_TABLE_SIZE 33
-
-extern u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D];
-extern u8 cck_swing_table[CCK_TABLE_SIZE][8];
-
-/* check Sta pointer valid or not */
-#define IS_STA_VALID(pSta) (pSta)
-
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
-
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
-
-void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
-
-bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
- bool bForceUpdate, u8 *pRATRState);
-
-u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
- u32 ra_mask, u8 rssi_level);
-
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
-
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
-
-void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
- enum odm_common_info_def CmnInfo, u32 Value);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_HWConfig.h b/drivers/staging/r8188eu/include/odm_HWConfig.h
deleted file mode 100644
index 3f7185780e87..000000000000
--- a/drivers/staging/r8188eu/include/odm_HWConfig.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __HALHWOUTSRC_H__
-#define __HALHWOUTSRC_H__
-
-/* CCK Rates, TxHT = 0 */
-#define DESC92C_RATE1M 0x00
-#define DESC92C_RATE11M 0x03
-
-/* MCS Rates, TxHT = 1 */
-#define DESC92C_RATEMCS8 0x14
-#define DESC92C_RATEMCS15 0x1b
-
-/* structure and define */
-
-struct phy_rx_agc_info {
- #ifdef __LITTLE_ENDIAN
- u8 gain:7, trsw:1;
- #else
- u8 trsw:1, gain:7;
- #endif
-};
-
-struct phy_status_rpt {
- struct phy_rx_agc_info path_agc[3];
- u8 ch_corr[2];
- u8 cck_sig_qual_ofdm_pwdb_all;
- u8 cck_agc_rpt_ofdm_cfosho_a;
- u8 cck_rpt_b_ofdm_cfosho_b;
- u8 rsvd_1;/* ch_corr_msb; */
- u8 noise_power_db_msb;
- u8 path_cfotail[2];
- u8 pcts_mask[2];
- s8 stream_rxevm[2];
- u8 path_rxsnr[3];
- u8 noise_power_db_lsb;
- u8 rsvd_2[3];
- u8 stream_csi[2];
- u8 stream_target_csi[2];
- s8 sig_evm;
- u8 rsvd_3;
-
-#ifdef __LITTLE_ENDIAN
- u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
- u8 sgi_en:1;
- u8 rxsc:2;
- u8 idle_long:1;
- u8 r_ant_train_en:1;
- u8 ant_sel_b:1;
- u8 ant_sel:1;
-#else /* _BIG_ENDIAN_ */
- u8 ant_sel:1;
- u8 ant_sel_b:1;
- u8 r_ant_train_en:1;
- u8 idle_long:1;
- u8 rxsc:2;
- u8 sgi_en:1;
- u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
-#endif
-};
-
-void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_per_pkt_info *pPktinfo,
- struct adapter *adapt);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_RTL8188E.h b/drivers/staging/r8188eu/include/odm_RTL8188E.h
deleted file mode 100644
index 4f16af248591..000000000000
--- a/drivers/staging/r8188eu/include/odm_RTL8188E.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __ODM_RTL8188E_H__
-#define __ODM_RTL8188E_H__
-
-#define MAIN_ANT 0
-#define AUX_ANT 1
-#define MAIN_ANT_CG_TRX 1
-#define AUX_ANT_CG_TRX 0
-#define MAIN_ANT_CGCS_RX 0
-#define AUX_ANT_CGCS_RX 1
-
-#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \
- le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24))
-#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \
- le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(25))
-#define SET_TX_DESC_ANTSEL_C_88E(__ptxdesc, __value) \
- le32p_replace_bits((__le32 *)(__ptxdesc + 28), __value, BIT(29))
-
-void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *pDM_Odm);
-
-void ODM_AntennaDiversity_88E(struct odm_dm_struct *pDM_Odm);
-
-void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *pDM_Odm, u8 *pDesc,
- u8 macId);
-
-void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *pDM_Odm, u8 Ant);
-
-void ODM_AntselStatistics_88E(struct odm_dm_struct *pDM_Odm, u8 antsel_tr_mux,
- u32 MacId, u8 RxPWDBAll);
-
-void odm_FastAntTraining(struct odm_dm_struct *pDM_Odm);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegDefine11N.h b/drivers/staging/r8188eu/include/odm_RegDefine11N.h
deleted file mode 100644
index 82a602b39cc7..000000000000
--- a/drivers/staging/r8188eu/include/odm_RegDefine11N.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __ODM_REGDEFINE11N_H__
-#define __ODM_REGDEFINE11N_H__
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-#define ODM_REG_TX_ANT_CTRL_11N 0x80C
-#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_ANTSEL_CTRL_11N 0x860
-#define ODM_REG_RX_ANT_CTRL_11N 0x864
-#define ODM_REG_PIN_CTRL_11N 0x870
-#define ODM_REG_SC_CNT_11N 0x8C4
-/* PAGE 9 */
-#define ODM_REG_ANT_MAPPING1_11N 0x914
-/* PAGE A */
-#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
-#define ODM_REG_CCK_CCA_11N 0xA0A
-#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
-#define ODM_REG_CCK_FA_RST_11N 0xA2C
-#define ODM_REG_CCK_FA_MSB_11N 0xA58
-#define ODM_REG_CCK_FA_LSB_11N 0xA5C
-#define ODM_REG_CCK_CCA_CNT_11N 0xA60
-#define ODM_REG_BB_PWR_SAV4_11N 0xA74
-/* PAGE B */
-#define ODM_REG_LNA_SWITCH_11N 0xB2C
-/* PAGE C */
-#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
-#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
-#define ODM_REG_IGI_A_11N 0xC50
-#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
-#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
-/* PAGE D */
-#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
-#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
-#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
-#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
-
-/* 2 MAC REG LIST */
-#define ODM_REG_ANTSEL_PIN_11N 0x4C
-#define ODM_REG_RESP_TX_11N 0x6D8
-
-/* DIG Related */
-#define ODM_BIT_IGI_11N 0x0000007F
-
-#endif
diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h
deleted file mode 100644
index 457fb3852a19..000000000000
--- a/drivers/staging/r8188eu/include/osdep_intf.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __OSDEP_INTF_H_
-#define __OSDEP_INTF_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-int netdev_open(struct net_device *pnetdev);
-int netdev_close(struct net_device *pnetdev);
-
-u8 rtw_init_drv_sw(struct adapter *padapter);
-void rtw_free_drv_sw(struct adapter *padapter);
-void rtw_reset_drv_sw(struct adapter *padapter);
-
-int rtw_start_drv_threads(struct adapter *padapter);
-void rtw_stop_drv_threads (struct adapter *padapter);
-void rtw_cancel_all_timer(struct adapter *padapter);
-
-int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
-struct net_device *rtw_init_netdev(struct adapter *padapter);
-u16 rtw_recv_select_queue(struct sk_buff *skb);
-
-void rtw_ips_dev_unload(struct adapter *padapter);
-
-int rtw_ips_pwr_up(struct adapter *padapter);
-void rtw_ips_pwr_down(struct adapter *padapter);
-
-#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
deleted file mode 100644
index f8ed04f32cae..000000000000
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __OSDEP_SERVICE_H_
-#define __OSDEP_SERVICE_H_
-
-#include <linux/sched/signal.h>
-
-#define _FAIL 0
-#define _SUCCESS 1
-#define RTW_RX_HANDLED 2
-
-#include <linux/spinlock.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/circ_buf.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <linux/io.h>
-#include <linux/semaphore.h>
-#include <linux/sem.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <linux/if_arp.h>
-#include <linux/rtnetlink.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
-#include <linux/interrupt.h> /* for struct tasklet_struct */
-#include <linux/ip.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-
-struct __queue {
- struct list_head queue;
- spinlock_t lock;
-};
-
-static inline struct list_head *get_list_head(struct __queue *queue)
-{
- return (&(queue->queue));
-}
-
-static inline void _set_timer(struct timer_list *ptimer, u32 delay_time)
-{
- mod_timer(ptimer, jiffies + msecs_to_jiffies(delay_time));
-}
-
-static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
-{
- return netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3));
-}
-
-int RTW_STATUS_CODE(int error_code);
-
-void *rtw_malloc2d(int h, int w, int size);
-
-#define rtw_init_queue(q) \
- do { \
- INIT_LIST_HEAD(&((q)->queue)); \
- spin_lock_init(&((q)->lock)); \
- } while (0)
-
-static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
-{
- return del_timer_sync(ptimer);
-}
-
-static inline void flush_signals_thread(void)
-{
- if (signal_pending (current))
- flush_signals(current);
-}
-
-struct rtw_netdev_priv_indicator {
- void *priv;
- u32 sizeof_priv;
-};
-struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
- void *old_priv);
-struct net_device *rtw_alloc_etherdev(int sizeof_priv);
-
-#define rtw_netdev_priv(netdev) \
- (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
-void rtw_free_netdev(struct net_device *netdev);
-
-#define NDEV_FMT "%s"
-#define NDEV_ARG(ndev) ndev->name
-#define ADPT_FMT "%s"
-#define ADPT_ARG(adapter) adapter->pnetdev->name
-#define FUNC_NDEV_FMT "%s(%s)"
-#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
-#define FUNC_ADPT_FMT "%s(%s)"
-#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
-
-#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
-
-/* Macros for handling unaligned memory accesses */
-
-#define RTW_GET_BE16(a) ((u16) (((a)[0] << 8) | (a)[1]))
-#define RTW_PUT_BE16(a, val) \
- do { \
- (a)[0] = ((u16) (val)) >> 8; \
- (a)[1] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define RTW_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
- ((u32) (a)[2]))
-
-#define RTW_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
-
-struct rtw_cbuf {
- u32 write;
- u32 read;
- u32 size;
- void *bufs[];
-};
-
-bool rtw_cbuf_empty(struct rtw_cbuf *cbuf);
-void *rtw_cbuf_pop(struct rtw_cbuf *cbuf);
-struct rtw_cbuf *rtw_cbuf_alloc(u32 size);
-int wifirate2_ratetbl_inx(unsigned char rate);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_cmd.h b/drivers/staging/r8188eu/include/rtl8188e_cmd.h
deleted file mode 100644
index c785cf8ed683..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_cmd.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_CMD_H__
-#define __RTL8188E_CMD_H__
-
-enum RTL8188E_H2C_CMD_ID {
- /* Class Common */
- H2C_COM_RSVD_PAGE = 0x00,
- H2C_COM_MEDIA_STATUS_RPT = 0x01,
- H2C_COM_SCAN = 0x02,
- H2C_COM_KEEP_ALIVE = 0x03,
- H2C_COM_DISCNT_DECISION = 0x04,
- H2C_COM_INIT_OFFLOAD = 0x06,
- H2C_COM_REMOTE_WAKE_CTL = 0x07,
- H2C_COM_AP_OFFLOAD = 0x08,
- H2C_COM_BCN_RSVD_PAGE = 0x09,
- H2C_COM_PROB_RSP_RSVD_PAGE = 0x0A,
-
- /* Class PS */
- H2C_PS_PWR_MODE = 0x20,
- H2C_PS_TUNE_PARA = 0x21,
- H2C_PS_TUNE_PARA_2 = 0x22,
- H2C_PS_LPS_PARA = 0x23,
- H2C_PS_P2P_OFFLOAD = 0x24,
-
- /* Class DM */
- H2C_DM_MACID_CFG = 0x40,
- H2C_DM_TXBF = 0x41,
-};
-
-struct cmd_msg_parm {
- u8 eid; /* element id */
- u8 sz; /* sz */
- u8 buf[6];
-};
-
-struct setpwrmode_parm {
- u8 Mode;/* 0:Active,1:LPS,2:WMMPS */
- u8 SmartPS_RLBM;/* LPS= 0:PS_Poll,1:PS_Poll,2:NullData,WMM= 0:PS_Poll,1:NullData */
- u8 AwakeInterval; /* unit: beacon interval */
- u8 bAllQueueUAPSD;
- u8 PwrState;/* AllON(0x0c),RFON(0x04),RFOFF(0x00) */
-};
-
-struct H2C_SS_RFOFF_PARAM {
- u8 ROFOn; /* 1: on, 0:off */
- u16 gpio_period; /* unit: 1024 us */
-} __packed;
-
-struct joinbssrpt_parm {
- u8 OpMode; /* RT_MEDIA_STATUS */
-};
-
-struct rsvdpage_loc {
- u8 LocProbeRsp;
- u8 LocPsPoll;
- u8 LocNullData;
- u8 LocQosNull;
- u8 LocBTQosNull;
-};
-
-struct P2P_PS_Offload_t {
- u8 Offload_En:1;
- u8 role:1; /* 1: Owner, 0: Client */
- u8 CTWindow_En:1;
- u8 NoA0_En:1;
- u8 NoA1_En:1;
- u8 AllStaSleep:1; /* Only valid in Owner */
- u8 discovery:1;
- u8 rsvd:1;
-};
-
-struct P2P_PS_CTWPeriod_t {
- u8 CTWPeriod; /* TU */
-};
-
-/* host message to firmware cmd */
-void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
-void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
-void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
- u8 rssi_level);
-
-void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state);
-
-void CheckFwRsvdPageContent(struct adapter *adapt);
-void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, u16 mstatus_rpt);
-
-#endif/* __RTL8188E_CMD_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_dm.h b/drivers/staging/r8188eu/include/rtl8188e_dm.h
deleted file mode 100644
index d62cdfc2db20..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_dm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_DM_H__
-#define __RTL8188E_DM_H__
-
-enum{
- UP_LINK,
- DOWN_LINK,
-};
-
-struct dm_priv {
- u32 InitODMFlag;
-
- /* Lower Signal threshold for Rate Adaptive */
- int EntryMinUndecoratedSmoothedPWDB;
- int MinUndecoratedPWDBForDM;
-};
-
-void rtl8188e_init_dm_priv(struct adapter *adapt);
-void rtl8188e_InitHalDm(struct adapter *adapt);
-void rtl8188e_HalDmWatchDog(struct adapter *adapt);
-
-void AntDivCompare8188E(struct adapter *adapt, struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src);
-u8 AntDivBeforeLink8188E(struct adapter *adapt);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
deleted file mode 100644
index feeb37c22897..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_HAL_H__
-#define __RTL8188E_HAL_H__
-
-/* include HAL Related header after HAL Related compiling flags */
-#include "rtl8188e_spec.h"
-#include "Hal8188EPhyReg.h"
-#include "Hal8188EPhyCfg.h"
-#include "rtl8188e_rf.h"
-#include "rtl8188e_dm.h"
-#include "rtl8188e_recv.h"
-#include "rtl8188e_xmit.h"
-#include "rtl8188e_cmd.h"
-#include "rtw_efuse.h"
-#include "odm.h"
-#include "odm_HWConfig.h"
-#include "odm_RegDefine11N.h"
-#include "HalPhyRf_8188e.h"
-#include "Hal8188ERateAdaptive.h"
-#include "HalHWImg8188E_MAC.h"
-#include "HalHWImg8188E_RF.h"
-#include "HalHWImg8188E_BB.h"
-#include "odm_RTL8188E.h"
-
-#define DRVINFO_SZ 4 /* unit is 8bytes */
-#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0))
-
-#define DRIVER_EARLY_INT_TIME 0x05
-#define BCN_DMA_ATIME_INT_TIME 0x02
-
-#define MAX_RX_DMA_BUFFER_SIZE_88E \
- 0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
- * WOLPattern(16*24)) */
-
-#define TX_SELE_LQ BIT(1) /* Low Queue */
-#define TX_SELE_NQ BIT(2) /* Normal Queue */
-
-/* Note: We will divide number of page equally for each queue other
- * than public queue! */
-/* 22k = 22528 bytes = 176 pages (@page = 128 bytes) */
-/* must reserved about 7 pages for LPS => 176-7 = 169 (0xA9) */
-/* 2*BCN / 1*ps-poll / 1*null-data /1*prob_rsp /1*QOS null-data /1*BT QOS
- * null-data */
-
-#define TX_TOTAL_PAGE_NUMBER_88E 0xA9/* 169 (21632=> 21k) */
-
-#define TX_PAGE_BOUNDARY_88E (TX_TOTAL_PAGE_NUMBER_88E + 1)
-
-#include "HalVerDef.h"
-#include "hal_com.h"
-
-/* Channel Plan */
-enum ChannelPlan {
- CHPL_FCC = 0,
- CHPL_IC = 1,
- CHPL_ETSI = 2,
- CHPL_SPA = 3,
- CHPL_FRANCE = 4,
- CHPL_MKK = 5,
- CHPL_MKK1 = 6,
- CHPL_ISRAEL = 7,
- CHPL_TELEC = 8,
- CHPL_GLOBAL = 9,
- CHPL_WORLD = 10,
-};
-
-struct txpowerinfo24g {
- u8 IndexCCK_Base[RF_PATH_MAX][MAX_CHNL_GROUP_24G];
- u8 IndexBW40_Base[RF_PATH_MAX][MAX_CHNL_GROUP_24G];
- /* If only one tx, only BW20 and OFDM are used. */
- s8 CCK_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 OFDM_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW20_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW40_Diff[RF_PATH_MAX][MAX_TX_COUNT];
-};
-
-#define EFUSE_REAL_CONTENT_LEN 512
-#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
-
-#define EFUSE_REAL_CONTENT_LEN_88E 256
-#define EFUSE_MAP_LEN_88E 512
-#define EFUSE_MAX_SECTION_88E 64
-/* To prevent out of boundary programming case, leave 1byte and program
- * full section */
-/* 9bytes + 1byt + 5bytes and pre 1byte. */
-/* For worst case: */
-/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
-/* PG data exclude header, dummy 7 bytes from CP test and reserved 1byte. */
-#define EFUSE_OOB_PROTECT_BYTES_88E 18
-
-#define EFUSE_PROTECT_BYTES_BANK 16
-
-#define USB_RXAGG_PAGE_COUNT 48
-#define USB_RXAGG_PAGE_TIMEOUT 0x4
-
-struct hal_data_8188e {
- struct HAL_VERSION VersionID;
- /* current WIFI_PHY values */
- enum ht_channel_width CurrentChannelBW;
- u8 CurrentChannel;
- u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
-
- u8 EEPROMRegulatory;
- u8 EEPROMThermalMeter;
-
- u8 Index24G_CCK_Base[CHANNEL_MAX_NUMBER];
- u8 Index24G_BW40_Base[CHANNEL_MAX_NUMBER];
- /* If only one tx, only BW20 and OFDM are used. */
- s8 OFDM_24G_Diff[MAX_TX_COUNT];
- s8 BW20_24G_Diff[MAX_TX_COUNT];
-
- /* HT 20<->40 Pwr diff */
- u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For HT<->legacy pwr diff */
- u8 TxPwrLegacyHtDiff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For power group */
- u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
-
- /* Read/write are allow for following hardware information variables */
- u8 pwrGroupCnt;
- u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
-
- u8 CrystalCap;
-
- u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
-
- struct bb_reg_def PHYRegDef;
-
- u32 RfRegChnlVal;
-
- /* for host message to fw */
- u8 LastHMEBoxNum;
-
- u8 fw_ractrl;
- u8 RegFwHwTxQCtrl;
- u8 RegReg542;
- u8 RegCR_1;
-
- struct dm_priv dmpriv;
- struct odm_dm_struct odmpriv;
-
- u8 CurAntenna;
- u8 AntDivCfg;
- u8 TRxAntDivType;
-
- u8 out_ep_extra_queues;
-
- struct P2P_PS_Offload_t p2p_ps_offload;
-
- /* Auto FSM to Turn On, include clock, isolation, power control
- * for MAC only */
- u8 bMacPwrCtrlOn;
-};
-
-s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
-
-/* EFuse */
-void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
-void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
-
-void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
-void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent,
- bool AutoLoadFail);
-void Hal_ReadThermalMeter_88E(struct adapter *padapter, u8 *PROMContent,
- bool AutoloadFail);
-void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
- bool AutoLoadFail);
-void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
- bool AutoLoadFail);
-
-void rtl8188e_read_chip_version(struct adapter *padapter);
-
-s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
-void rtw_cancel_all_timer(struct adapter *padapter);
-
-#endif /* __RTL8188E_HAL_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
deleted file mode 100644
index dc4f358f646d..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_RECV_H__
-#define __RTL8188E_RECV_H__
-
-#define TX_RPT1_PKT_LEN 8
-
-#define NR_PREALLOC_RECV_SKB (8)
-
-#define NR_RECVBUFF (4)
-
-#define MAX_RECVBUF_SZ (15360) /* 15k < 16k */
-
-struct phy_stat {
- unsigned int phydw0;
- unsigned int phydw1;
- unsigned int phydw2;
- unsigned int phydw3;
- unsigned int phydw4;
- unsigned int phydw5;
- unsigned int phydw6;
- unsigned int phydw7;
-};
-
-/* Rx smooth factor */
-#define Rx_Smooth_Factor (20)
-
-enum rx_packet_type {
- NORMAL_RX,/* Normal rx packet */
- TX_REPORT1,/* CCX */
- TX_REPORT2,/* TX RPT */
- HIS_REPORT,/* USB HISR RPT */
-};
-
-void rtl8188eu_recv_tasklet(unsigned long priv);
-void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
-void update_recvframe_attrib_88e(struct recv_frame *fra, struct recv_stat *stat);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_rf.h b/drivers/staging/r8188eu/include/rtl8188e_rf.h
deleted file mode 100644
index 63ac0acc68fd..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_rf.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_RF_H__
-#define __RTL8188E_RF_H__
-
-#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
-#define RF6052_MAX_PATH 2
-
-int phy_RF6052_Config_ParaFile(struct adapter *Adapter);
-void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
- enum ht_channel_width Bandwidth);
-void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
-void rtl8188e_PHY_RF6052SetOFDMTxPower(struct adapter *Adapter, u8 *ofdm,
- u8 *pwrbw20, u8 *pwrbw40, u8 channel);
-
-#endif/* __RTL8188E_RF_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
deleted file mode 100644
index 25b31417cd58..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ /dev/null
@@ -1,1142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_SPEC_H__
-#define __RTL8188E_SPEC_H__
-
-/* 8192C Register offset definition */
-
-#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
-#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
-
-/* 8188E PKT_BUFF_ACCESS_CTRL value */
-#define TXPKT_BUF_SELECT 0x69
-#define RXPKT_BUF_SELECT 0xA5
-#define DISABLE_TRXPKT_BUF_ACCESS 0x0
-
-/* 0x0000h ~ 0x00FFh System Configuration */
-#define REG_SYS_ISO_CTRL 0x0000
-#define REG_SYS_FUNC_EN 0x0002
-#define REG_APS_FSMCO 0x0004
-#define REG_SYS_CLKR 0x0008
-#define REG_9346CR 0x000A
-#define REG_EE_VPD 0x000C
-#define REG_AFE_MISC 0x0010
-#define REG_SPS0_CTRL 0x0011
-#define REG_SPS_OCP_CFG 0x0018
-#define REG_RSV_CTRL 0x001C
-#define REG_RF_CTRL 0x001F
-#define REG_LDOA15_CTRL 0x0020
-#define REG_LDOV12D_CTRL 0x0021
-#define REG_LDOHCI12_CTRL 0x0022
-#define REG_LPLDO_CTRL 0x0023
-#define REG_AFE_XTAL_CTRL 0x0024
-#define REG_AFE_PLL_CTRL 0x0028
-#define REG_APE_PLL_CTRL_EXT 0x002c
-#define REG_EFUSE_CTRL 0x0030
-#define REG_EFUSE_TEST 0x0034
-#define REG_GPIO_MUXCFG 0x0040
-#define REG_GPIO_IO_SEL 0x0042
-#define REG_MAC_PINMUX_CFG 0x0043
-#define REG_GPIO_PIN_CTRL 0x0044
-#define REG_GPIO_INTM 0x0048
-#define REG_LEDCFG0 0x004C
-#define REG_LEDCFG1 0x004D
-#define REG_LEDCFG2 0x004E
-#define REG_LEDCFG3 0x004F
-#define REG_FSIMR 0x0050
-#define REG_FSISR 0x0054
-#define REG_HSIMR 0x0058
-#define REG_HSISR 0x005c
-#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Pin Control. */
-#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Select. */
-#define REG_BB_PAD_CTRL 0x0064
-#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
- * Multi-Function control source. */
-#define REG_GPIO_OUTPUT 0x006c
-#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
-#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
-#define REG_MCUFWDL 0x0080
-#define REG_WOL_EVENT 0x0081 /* RTL8188E */
-#define REG_MCUTSTCFG 0x0084
-#define REG_HMEBOX_E0 0x0088
-#define REG_HMEBOX_E1 0x008A
-#define REG_HMEBOX_E2 0x008C
-#define REG_HMEBOX_E3 0x008E
-#define REG_HMEBOX_EXT_0 0x01F0
-#define REG_HMEBOX_EXT_1 0x01F4
-#define REG_HMEBOX_EXT_2 0x01F8
-#define REG_HMEBOX_EXT_3 0x01FC
-#define REG_HIMR_88E 0x00B0
-#define REG_HISR_88E 0x00B4
-#define REG_HIMRE_88E 0x00B8
-#define REG_HISRE_88E 0x00BC
-#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection
- * for RTL8723 */
-#define REG_BIST_SCAN 0x00D0
-#define REG_BIST_RPT 0x00D4
-#define REG_BIST_ROM_RPT 0x00D8
-#define REG_USB_SIE_INTF 0x00E0
-#define REG_PCIE_MIO_INTF 0x00E4
-#define REG_PCIE_MIO_INTD 0x00E8
-#define REG_HPON_FSM 0x00EC
-#define REG_SYS_CFG 0x00F0
-#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
-#define REG_TYPE_ID 0x00FC
-
-#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
-
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-#define REG_CR 0x0100
-#define REG_PBP 0x0104
-#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
-#define REG_TRXDMA_CTRL 0x010C
-#define REG_TRXFF_BNDY 0x0114
-#define REG_TRXFF_STATUS 0x0118
-#define REG_RXFF_PTR 0x011C
-/* define REG_HIMR 0x0120 */
-/* define REG_HISR 0x0124 */
-#define REG_HIMRE 0x0128
-#define REG_HISRE 0x012C
-#define REG_CPWM 0x012F
-#define REG_FWIMR 0x0130
-#define REG_FTIMR 0x0138
-#define REG_FWISR 0x0134
-#define REG_PKTBUF_DBG_CTRL 0x0140
-#define REG_PKTBUF_DBG_ADDR (REG_PKTBUF_DBG_CTRL)
-#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+2)
-#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+3)
-#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
-#define REG_PKTBUF_DBG_DATA_L 0x0144
-#define REG_PKTBUF_DBG_DATA_H 0x0148
-
-#define REG_TC0_CTRL 0x0150
-#define REG_TC1_CTRL 0x0154
-#define REG_TC2_CTRL 0x0158
-#define REG_TC3_CTRL 0x015C
-#define REG_TC4_CTRL 0x0160
-#define REG_TCUNIT_BASE 0x0164
-#define REG_MBIST_START 0x0174
-#define REG_MBIST_DONE 0x0178
-#define REG_MBIST_FAIL 0x017C
-#define REG_32K_CTRL 0x0194 /* RTL8188E */
-#define REG_C2HEVT_MSG_NORMAL 0x01A0
-#define REG_C2HEVT_CLEAR 0x01AF
-#define REG_MCUTST_1 0x01c0
-#define REG_FMETHR 0x01C8
-#define REG_HMETFR 0x01CC
-#define REG_HMEBOX_0 0x01D0
-#define REG_HMEBOX_1 0x01D4
-#define REG_HMEBOX_2 0x01D8
-#define REG_HMEBOX_3 0x01DC
-
-#define REG_LLT_INIT 0x01E0
-
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-#define REG_RQPN 0x0200
-#define REG_FIFOPAGE 0x0204
-#define REG_TDECTRL 0x0208
-#define REG_TXDMA_OFFSET_CHK 0x020C
-#define REG_TXDMA_STATUS 0x0210
-#define REG_RQPN_NPQ 0x0214
-
-/* 0x0280h ~ 0x02FFh RXDMA Configuration */
-#define REG_RXDMA_AGG_PG_TH 0x0280
-#define REG_RXPKT_NUM 0x0284
-#define REG_RXDMA_STATUS 0x0288
-
-/* 0x0300h ~ 0x03FFh PCIe */
-#define REG_PCIE_CTRL_REG 0x0300
-#define REG_INT_MIG 0x0304 /* Interrupt Migration */
-#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descr Address */
-#define REG_HQ_DESA 0x0310 /* TX High Queue Descr Addr */
-#define REG_MGQ_DESA 0x0318 /* TX Manage Queue Descr Addr*/
-#define REG_VOQ_DESA 0x0320 /* TX VO Queue Descr Addr */
-#define REG_VIQ_DESA 0x0328 /* TX VI Queue Descr Addr */
-#define REG_BEQ_DESA 0x0330 /* TX BE Queue Descr Addr */
-#define REG_BKQ_DESA 0x0338 /* TX BK Queue Descr Addr */
-#define REG_RX_DESA 0x0340 /* RX Queue Descr Addr */
-#define REG_MDIO 0x0354 /* MDIO for Access PCIE PHY */
-#define REG_DBG_SEL 0x0360 /* Debug Selection Register */
-#define REG_PCIE_HRPWM 0x0361 /* PCIe RPWM */
-#define REG_PCIE_HCPWM 0x0363 /* PCIe CPWM */
-#define REG_WATCH_DOG 0x0368
-
-/* RTL8723 series ------------------------------ */
-#define REG_PCIE_HISR 0x03A0
-
-/* spec version 11 */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-#define REG_VOQ_INFORMATION 0x0400
-#define REG_VIQ_INFORMATION 0x0404
-#define REG_BEQ_INFORMATION 0x0408
-#define REG_BKQ_INFORMATION 0x040C
-#define REG_MGQ_INFORMATION 0x0410
-#define REG_HGQ_INFORMATION 0x0414
-#define REG_BCNQ_INFORMATION 0x0418
-#define REG_TXPKT_EMPTY 0x041A
-
-#define REG_CPU_MGQ_INFORMATION 0x041C
-#define REG_FWHW_TXQ_CTRL 0x0420
-#define REG_HWSEQ_CTRL 0x0423
-#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
-#define REG_TXPKTBUF_MGQ_BDNY 0x0425
-#define REG_LIFETIME_EN 0x0426
-#define REG_MULTI_BCNQ_OFFSET 0x0427
-#define REG_SPEC_SIFS 0x0428
-#define REG_RL 0x042A
-#define REG_DARFRC 0x0430
-#define REG_RARFRC 0x0438
-#define REG_RRSR 0x0440
-#define REG_ARFR0 0x0444
-#define REG_ARFR1 0x0448
-#define REG_ARFR2 0x044C
-#define REG_ARFR3 0x0450
-#define REG_AGGLEN_LMT 0x0458
-#define REG_AMPDU_MIN_SPACE 0x045C
-#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
-#define REG_FAST_EDCA_CTRL 0x0460
-#define REG_RD_RESP_PKT_TH 0x0463
-#define REG_INIRTS_RATE_SEL 0x0480
-/* define REG_INIDATA_RATE_SEL 0x0484 */
-#define REG_POWER_STATUS 0x04A4
-#define REG_POWER_STAGE1 0x04B4
-#define REG_POWER_STAGE2 0x04B8
-#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
-#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
-#define REG_STBC_SETTING 0x04C4
-#define REG_PROT_MODE_CTRL 0x04C8
-#define REG_MAX_AGGR_NUM 0x04CA
-#define REG_RTS_MAX_AGGR_NUM 0x04CB
-#define REG_BAR_MODE_CTRL 0x04CC
-#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
-#define REG_EARLY_MODE_CONTROL 0x4D0
-#define REG_NQOS_SEQ 0x04DC
-#define REG_QOS_SEQ 0x04DE
-#define REG_NEED_CPU_HANDLE 0x04E0
-#define REG_PKT_LOSE_RPT 0x04E1
-#define REG_PTCL_ERR_STATUS 0x04E2
-#define REG_TX_RPT_CTRL 0x04EC
-#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
-#define REG_DUMMY 0x04FC
-
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-#define REG_EDCA_VO_PARAM 0x0500
-#define REG_EDCA_VI_PARAM 0x0504
-#define REG_EDCA_BE_PARAM 0x0508
-#define REG_EDCA_BK_PARAM 0x050C
-#define REG_BCNTCFG 0x0510
-#define REG_PIFS 0x0512
-#define REG_RDG_PIFS 0x0513
-#define REG_SIFS_CTX 0x0514
-#define REG_SIFS_TRX 0x0516
-#define REG_TSFTR_SYN_OFFSET 0x0518
-#define REG_AGGR_BREAK_TIME 0x051A
-#define REG_SLOT 0x051B
-#define REG_TX_PTCL_CTRL 0x0520
-#define REG_TXPAUSE 0x0522
-#define REG_DIS_TXREQ_CLR 0x0523
-#define REG_RD_CTRL 0x0524
-/* Format for offset 540h-542h: */
-/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
- * beacon content before TBTT. */
-/* [7:4]: Reserved. */
-/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
- * to send the beacon packet. */
-/* [23:20]: Reserved */
-/* Description: */
-/* | */
-/* |<--Setup--|--Hold------------>| */
-/* --------------|---------------------- */
-/* | */
-/* TBTT */
-/* Note: We cannot update beacon content to HW or send any AC packets during
- * the time between Setup and Hold. */
-#define REG_TBTT_PROHIBIT 0x0540
-#define REG_RD_NAV_NXT 0x0544
-#define REG_NAV_PROT_LEN 0x0546
-#define REG_BCN_CTRL 0x0550
-#define REG_BCN_CTRL_1 0x0551
-#define REG_MBID_NUM 0x0552
-#define REG_DUAL_TSF_RST 0x0553
-#define REG_BCN_INTERVAL 0x0554
-#define REG_DRVERLYINT 0x0558
-#define REG_BCNDMATIM 0x0559
-#define REG_ATIMWND 0x055A
-#define REG_BCN_MAX_ERR 0x055D
-#define REG_RXTSF_OFFSET_CCK 0x055E
-#define REG_RXTSF_OFFSET_OFDM 0x055F
-#define REG_TSFTR 0x0560
-#define REG_TSFTR1 0x0568
-#define REG_ATIMWND_1 0x0570
-#define REG_PSTIMER 0x0580
-#define REG_TIMER0 0x0584
-#define REG_TIMER1 0x0588
-#define REG_ACMHWCTRL 0x05C0
-
-/* define REG_FW_TSF_SYNC_CNT 0x04A0 */
-#define REG_FW_RESET_TSF_CNT_1 0x05FC
-#define REG_FW_RESET_TSF_CNT_0 0x05FD
-#define REG_FW_BCN_DIS_CNT 0x05FE
-
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-#define REG_APSD_CTRL 0x0600
-#define REG_BWOPMODE 0x0603
-#define REG_TCR 0x0604
-#define REG_RCR 0x0608
-#define REG_RX_PKT_LIMIT 0x060C
-#define REG_RX_DLK_TIME 0x060D
-#define REG_RX_DRVINFO_SZ 0x060F
-
-#define REG_MACID 0x0610
-#define REG_BSSID 0x0618
-#define REG_MAR 0x0620
-#define REG_MBIDCAMCFG 0x0628
-
-#define REG_USTIME_EDCA 0x0638
-#define REG_MAC_SPEC_SIFS 0x063A
-
-/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
-/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
-#define REG_R2T_SIFS 0x063C
-/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
-#define REG_T2T_SIFS 0x063E
-#define REG_ACKTO 0x0640
-#define REG_CTS2TO 0x0641
-#define REG_EIFS 0x0642
-
-/* RXERR_RPT */
-#define RXERR_TYPE_OFDM_PPDU 0
-#define RXERR_TYPE_OFDM_false_ALARM 1
-#define RXERR_TYPE_OFDM_MPDU_OK 2
-#define RXERR_TYPE_OFDM_MPDU_FAIL 3
-#define RXERR_TYPE_CCK_PPDU 4
-#define RXERR_TYPE_CCK_false_ALARM 5
-#define RXERR_TYPE_CCK_MPDU_OK 6
-#define RXERR_TYPE_CCK_MPDU_FAIL 7
-#define RXERR_TYPE_HT_PPDU 8
-#define RXERR_TYPE_HT_false_ALARM 9
-#define RXERR_TYPE_HT_MPDU_TOTAL 10
-#define RXERR_TYPE_HT_MPDU_OK 11
-#define RXERR_TYPE_HT_MPDU_FAIL 12
-#define RXERR_TYPE_RX_FULL_DROP 15
-
-#define RXERR_COUNTER_MASK 0xFFFFF
-#define RXERR_RPT_RST BIT(27)
-#define _RXERR_RPT_SEL(type) ((type) << 28)
-
-/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
- * The default value is always too small, but the WiFi TestPlan test
- * by 25,000 microseconds of NAV through sending CTS in the air.
- * We must update this value greater than 25,000 microseconds to pass
- * the item. The offset of NAV_UPPER in 8192C Spec is incorrect, and
- * the offset should be 0x0652. */
-#define REG_NAV_UPPER 0x0652 /* unit of 128 */
-
-/* WMA, BA, CCX */
-/* define REG_NAV_CTRL 0x0650 */
-#define REG_BACAMCMD 0x0654
-#define REG_BACAMCONTENT 0x0658
-#define REG_LBDLY 0x0660
-#define REG_FWDLY 0x0661
-#define REG_RXERR_RPT 0x0664
-#define REG_WMAC_TRXPTCL_CTL 0x0668
-
-/* Security */
-#define REG_CAMCMD 0x0670
-#define REG_CAMWRITE 0x0674
-#define REG_CAMREAD 0x0678
-#define REG_CAMDBG 0x067C
-#define REG_SECCFG 0x0680
-
-/* Power */
-#define REG_WOW_CTRL 0x0690
-#define REG_PS_RX_INFO 0x0692
-#define REG_UAPSD_TID 0x0693
-#define REG_WKFMCAM_CMD 0x0698
-#define REG_WKFMCAM_NUM_88E 0x698
-#define REG_RXFLTMAP0 0x06A0
-#define REG_RXFLTMAP1 0x06A2
-#define REG_RXFLTMAP2 0x06A4
-#define REG_BCN_PSR_RPT 0x06A8
-#define REG_BT_COEX_TABLE 0x06C0
-
-/* Hardware Port 2 */
-#define REG_MACID1 0x0700
-#define REG_BSSID1 0x0708
-
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
-/* For normal chip */
-#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
-#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
-#define REG_NORMAL_SIE_OPTIONAL 0xFE64
-#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
-#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
-#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
-#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* 0xFE6D, for RTL8723 only. */
-#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
-#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
-
-/* TODO: use these definition when using REG_xxx naming rule. */
-/* NOTE: DO NOT Remove these definition. Use later. */
-
-#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
-#define EFUSE_TEST REG_EFUSE_TEST /* E-Fuse Test. */
-#define MSR (REG_CR + 2) /* Media Status reg */
-#define ISR REG_HISR_88E
-/* Timing Sync Function Timer Register. */
-#define TSFR REG_TSFTR
-
-#define PBP REG_PBP
-
-/* Redifine MACID register, to compatible prior ICs. */
-/* MAC ID Register, Offset 0x0050-0x0053 */
-#define IDR0 REG_MACID
-/* MAC ID Register, Offset 0x0054-0x0055 */
-#define IDR4 (REG_MACID + 4)
-
-/* 9. Security Control Registers (Offset: ) */
-/* IN 8190 Data Sheet is called CAMcmd */
-#define RWCAM REG_CAMCMD
-/* Software write CAM input content */
-#define WCAMI REG_CAMWRITE
-/* Software read/write CAM config */
-#define RCAMO REG_CAMREAD
-#define CAMDBG REG_CAMDBG
-/* Security Configuration Register */
-#define SECR REG_SECCFG
-
-/* Unused register */
-#define UnusedRegister 0x1BF
-#define DCAM UnusedRegister
-#define PSR UnusedRegister
-#define BBAddr UnusedRegister
-#define PhyDataR UnusedRegister
-
-/* Min Spacing related settings. */
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
-/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
-#define GPIOSEL_GPIO 0
-#define GPIOSEL_ENBT BIT(5)
-
-/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
-/* GPIO pins input value */
-#define GPIO_IN REG_GPIO_PIN_CTRL
-/* GPIO pins output value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
-/* GPIO pins output enable when a bit is set to "1"; otherwise,
- * input is configured. */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
-
-/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-#define HSIMR_GPIO12_0_INT_EN BIT(0)
-#define HSIMR_SPS_OCP_INT_EN BIT(5)
-#define HSIMR_RON_INT_EN BIT(6)
-#define HSIMR_PDN_INT_EN BIT(7)
-#define HSIMR_GPIO9_INT_EN BIT(25)
-
-/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
-#define HSISR_GPIO12_0_INT BIT(0)
-#define HSISR_SPS_OCP_INT BIT(5)
-#define HSISR_RON_INT_EN BIT(6)
-#define HSISR_PDNINT BIT(7)
-#define HSISR_GPIO9_INT BIT(25)
-
-/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
-/* 88E Driver Initialization Offload REG_FDHM0(Offset 0x88, 8 bits) */
-/* IOL config for REG_FDHM0(Reg0x88) */
-#define CMD_INIT_LLT BIT(0)
-#define CMD_READ_EFUSE_MAP BIT(1)
-#define CMD_EFUSE_PATCH BIT(2)
-#define CMD_IOCONFIG BIT(3)
-#define CMD_INIT_LLT_ERR BIT(4)
-#define CMD_READ_EFUSE_MAP_ERR BIT(5)
-#define CMD_EFUSE_PATCH_ERR BIT(6)
-#define CMD_IOCONFIG_ERR BIT(7)
-
-/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
-/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
-#define RRSR_1M BIT(0)
-#define RRSR_2M BIT(1)
-#define RRSR_5_5M BIT(2)
-#define RRSR_11M BIT(3)
-#define RRSR_6M BIT(4)
-#define RRSR_9M BIT(5)
-#define RRSR_12M BIT(6)
-#define RRSR_18M BIT(7)
-#define RRSR_24M BIT(8)
-#define RRSR_36M BIT(9)
-#define RRSR_48M BIT(10)
-#define RRSR_54M BIT(11)
-#define RRSR_MCS0 BIT(12)
-#define RRSR_MCS1 BIT(13)
-#define RRSR_MCS2 BIT(14)
-#define RRSR_MCS3 BIT(15)
-#define RRSR_MCS4 BIT(16)
-#define RRSR_MCS5 BIT(17)
-#define RRSR_MCS6 BIT(18)
-#define RRSR_MCS7 BIT(19)
-
-/* 8192C Response Rate Set Register (offset 0x1BF, 8bits) */
-/* WOL bit information */
-#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
-#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
-
-/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
-#define BW_OPMODE_20MHZ BIT(2)
-
-#define CAM_WRITE BIT(16)
-#define CAM_POLLINIG BIT(31)
-
-#define SCR_UseDK 0x01
-#define SCR_TxSecEnable 0x02
-#define SCR_RxSecEnable 0x04
-
-/* 10. Power Save Control Registers (Offset: 0x0260 - 0x02DF) */
-#define WOW_PMEN BIT(0) /* Power management Enable. */
-#define WOW_WOMEN BIT(1) /* WoW function on or off. */
-#define WOW_MAGIC BIT(2) /* Magic packet */
-#define WOW_UWF BIT(3) /* Unicast Wakeup frame. */
-
-/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
-/* 8188 IMR/ISR bits */
-#define IMR_DISABLED_88E 0x0
-/* IMR DW0(0x0060-0063) Bit 0-31 */
-#define IMR_TXCCK_88E BIT(30) /* TXRPT interrupt when CCX bit of the packet is set */
-#define IMR_PSTIMEOUT_88E BIT(29) /* Power Save Time Out Interrupt */
-#define IMR_GTINT4_88E BIT(28) /* When GTIMER4 expires, this bit is set to 1 */
-#define IMR_GTINT3_88E BIT(27) /* When GTIMER3 expires, this bit is set to 1 */
-#define IMR_TBDER_88E BIT(26) /* Transmit Beacon0 Error */
-#define IMR_TBDOK_88E BIT(25) /* Transmit Beacon0 OK */
-#define IMR_TSF_BIT32_TOGGLE_88E BIT(24) /* TSF Timer BIT32 toggle indication interrupt */
-#define IMR_BCNDMAINT0_88E BIT(20) /* Beacon DMA Interrupt 0 */
-#define IMR_BCNDERR0_88E BIT(16) /* Beacon Queue DMA Error 0 */
-#define IMR_HSISR_IND_ON_INT_88E BIT(15) /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
-#define IMR_BCNDMAINT_E_88E BIT(14) /* Beacon DMA Interrupt Extension for Win7 */
-#define IMR_ATIMEND_88E BIT(12) /* CTWidnow End or ATIM Window End */
-#define IMR_HISR1_IND_INT_88E BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
-#define IMR_C2HCMD_88E BIT(10) /* CPU to Host Command INT Status, Write 1 clear */
-#define IMR_CPWM2_88E BIT(9) /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_CPWM_88E BIT(8) /* CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_HIGHDOK_88E BIT(7) /* High Queue DMA OK */
-#define IMR_MGNTDOK_88E BIT(6) /* Management Queue DMA OK */
-#define IMR_BKDOK_88E BIT(5) /* AC_BK DMA OK */
-#define IMR_BEDOK_88E BIT(4) /* AC_BE DMA OK */
-#define IMR_VIDOK_88E BIT(3) /* AC_VI DMA OK */
-#define IMR_VODOK_88E BIT(2) /* AC_VO DMA OK */
-#define IMR_RDU_88E BIT(1) /* Rx Descriptor Unavailable */
-#define IMR_ROK_88E BIT(0) /* Receive DMA OK */
-
-/* IMR DW1(0x00B4-00B7) Bit 0-31 */
-#define IMR_BCNDMAINT7_88E BIT(27) /* Beacon DMA Interrupt 7 */
-#define IMR_BCNDMAINT6_88E BIT(26) /* Beacon DMA Interrupt 6 */
-#define IMR_BCNDMAINT5_88E BIT(25) /* Beacon DMA Interrupt 5 */
-#define IMR_BCNDMAINT4_88E BIT(24) /* Beacon DMA Interrupt 4 */
-#define IMR_BCNDMAINT3_88E BIT(23) /* Beacon DMA Interrupt 3 */
-#define IMR_BCNDMAINT2_88E BIT(22) /* Beacon DMA Interrupt 2 */
-#define IMR_BCNDMAINT1_88E BIT(21) /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDERR7_88E BIT(20) /* Beacon DMA Error Int 7 */
-#define IMR_BCNDERR6_88E BIT(19) /* Beacon DMA Error Int 6 */
-#define IMR_BCNDERR5_88E BIT(18) /* Beacon DMA Error Int 5 */
-#define IMR_BCNDERR4_88E BIT(17) /* Beacon DMA Error Int 4 */
-#define IMR_BCNDERR3_88E BIT(16) /* Beacon DMA Error Int 3 */
-#define IMR_BCNDERR2_88E BIT(15) /* Beacon DMA Error Int 2 */
-#define IMR_BCNDERR1_88E BIT(14) /* Beacon DMA Error Int 1 */
-#define IMR_ATIMEND_E_88E BIT(13) /* ATIM Window End Ext for Win7 */
-#define IMR_TXERR_88E BIT(11) /* Tx Err Flag Int Status, write 1 clear. */
-#define IMR_RXERR_88E BIT(10) /* Rx Err Flag INT Status, Write 1 clear */
-#define IMR_TXFOVW_88E BIT(9) /* Transmit FIFO Overflow */
-#define IMR_RXFOVW_88E BIT(8) /* Receive FIFO Overflow */
-
-#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
-
-/* 8192C EFUSE */
-#define HWSET_MAX_SIZE 256
-#define HWSET_MAX_SIZE_88E 512
-
-/*===================================================================
-=====================================================================
-Here the register defines are for 92C. When the define is as same with 92C,
-we will use the 92C's define for the consistency
-So the following defines for 92C is not entire!!!!!!
-=====================================================================
-=====================================================================*/
-/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
-/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
-/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
- * RTL8192S/RTL8192C are wrong, */
-/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
- * and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. */
-#define StopBecon BIT(6)
-#define StopHigh BIT(5)
-#define StopMgt BIT(4)
-#define StopBK BIT(3)
-#define StopBE BIT(2)
-#define StopVI BIT(1)
-#define StopVO BIT(0)
-
-/* 8192C (RCR) Receive Configuration Register(Offset 0x608, 32 bits) */
-#define RCR_APPFCS BIT(31) /* WMAC append FCS after payload */
-#define RCR_APP_MIC BIT(30)
-#define RCR_APP_PHYSTS BIT(28)
-#define RCR_APP_ICV BIT(29)
-#define RCR_APP_PHYST_RXFF BIT(28)
-#define RCR_APP_BA_SSN BIT(27) /* Accept BA SSN */
-#define RCR_ENMBID BIT(24) /* Enable Multiple BssId. */
-#define RCR_LSIGEN BIT(23)
-#define RCR_MFBEN BIT(22)
-#define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
-#define RCR_AMF BIT(13) /* Accept management type frame */
-#define RCR_ACF BIT(12) /* Accept control type frame */
-#define RCR_ADF BIT(11) /* Accept data type frame */
-#define RCR_AICV BIT(9) /* Accept ICV error packet */
-#define RCR_ACRC32 BIT(8) /* Accept CRC32 error packet */
-#define RCR_CBSSID_BCN BIT(7) /* Accept BSSID match packet
- * (Rx beacon, probe rsp) */
-#define RCR_CBSSID_DATA BIT(6) /* Accept BSSID match (Data)*/
-#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match */
-#define RCR_APWRMGT BIT(5) /* Accept power management pkt*/
-#define RCR_ADD3 BIT(4) /* Accept address 3 match pkt */
-#define RCR_AB BIT(3) /* Accept broadcast packet */
-#define RCR_AM BIT(2) /* Accept multicast packet */
-#define RCR_APM BIT(1) /* Accept physical match pkt */
-#define RCR_AAP BIT(0) /* Accept all unicast packet */
-#define RCR_MXDMA_OFFSET 8
-#define RCR_FIFO_OFFSET 13
-
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
-#define REG_USB_HRPWM 0xFE58
-#define REG_USB_HCPWM 0xFE57
-/* 8192C Register Bit and Content definition */
-/* 0x0000h ~ 0x00FFh System Configuration */
-
-/* 2 SYS_ISO_CTRL */
-#define ISO_MD2PP BIT(0)
-#define ISO_UA2USB BIT(1)
-#define ISO_UD2CORE BIT(2)
-#define ISO_PA2PCIE BIT(3)
-#define ISO_PD2CORE BIT(4)
-#define ISO_IP2MAC BIT(5)
-#define ISO_DIOP BIT(6)
-#define ISO_DIOE BIT(7)
-#define ISO_EB2CORE BIT(8)
-#define ISO_DIOR BIT(9)
-#define PWC_EV12V BIT(15)
-
-/* 2 SYS_FUNC_EN */
-#define FEN_BBRSTB BIT(0)
-#define FEN_BB_GLB_RSTn BIT(1)
-#define FEN_USBA BIT(2)
-#define FEN_UPLL BIT(3)
-#define FEN_USBD BIT(4)
-#define FEN_DIO_PCIE BIT(5)
-#define FEN_PCIEA BIT(6)
-#define FEN_PPLL BIT(7)
-#define FEN_PCIED BIT(8)
-#define FEN_DIOE BIT(9)
-#define FEN_CPUEN BIT(10)
-#define FEN_DCORE BIT(11)
-#define FEN_ELDR BIT(12)
-#define FEN_DIO_RF BIT(13)
-#define FEN_HWPDN BIT(14)
-#define FEN_MREGEN BIT(15)
-
-/* 2 APS_FSMCO */
-#define PFM_LDALL BIT(0)
-#define PFM_ALDN BIT(1)
-#define PFM_LDKP BIT(2)
-#define PFM_WOWL BIT(3)
-#define EnPDN BIT(4)
-#define PDN_PL BIT(5)
-#define APFM_ONMAC BIT(8)
-#define APFM_OFF BIT(9)
-#define APFM_RSM BIT(10)
-#define AFSM_HSUS BIT(11)
-#define AFSM_PCIE BIT(12)
-#define APDM_MAC BIT(13)
-#define APDM_HOST BIT(14)
-#define APDM_HPDN BIT(15)
-#define RDY_MACON BIT(16)
-#define SUS_HOST BIT(17)
-#define ROP_ALD BIT(20)
-#define ROP_PWR BIT(21)
-#define ROP_SPS BIT(22)
-#define SOP_MRST BIT(25)
-#define SOP_FUSE BIT(26)
-#define SOP_ABG BIT(27)
-#define SOP_AMB BIT(28)
-#define SOP_RCK BIT(29)
-#define SOP_A8M BIT(30)
-#define XOP_BTCK BIT(31)
-
-/* 2 SYS_CLKR */
-#define ANAD16V_EN BIT(0)
-#define ANA8M BIT(1)
-#define MACSLP BIT(4)
-#define LOADER_CLK_EN BIT(5)
-
-/* 2 9346CR */
-
-#define BOOT_FROM_EEPROM BIT(4)
-#define EEPROM_EN BIT(5)
-
-/* 2 SPS0_CTRL */
-
-/* 2 SPS_OCP_CFG */
-
-/* 2 RF_CTRL */
-#define RF_EN BIT(0)
-#define RF_RSTB BIT(1)
-#define RF_SDMRSTB BIT(2)
-
-/* 2 LDOV12D_CTRL */
-#define LDV12_EN BIT(0)
-#define LDV12_SDBY BIT(1)
-#define LPLDO_HSM BIT(2)
-#define LPLDO_LSM_DIS BIT(3)
-#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
-
-/* 2EFUSE_CTRL */
-#define ALD_EN BIT(18)
-#define EF_PD BIT(19)
-#define EF_FLAG BIT(31)
-
-/* 2 EFUSE_TEST (For RTL8723 partially) */
-#define EF_TRPT BIT(7)
-/* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
-#define EF_CELL_SEL (BIT(8)|BIT(9))
-#define LDOE25_EN BIT(31)
-#define EFUSE_SEL(x) (((x) & 0x3) << 8)
-#define EFUSE_SEL_MASK 0x300
-#define EFUSE_WIFI_SEL_0 0x0
-#define EFUSE_BT_SEL_0 0x1
-#define EFUSE_BT_SEL_1 0x2
-#define EFUSE_BT_SEL_2 0x3
-
-#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
-#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
-
-/* 2 8051FWDL */
-/* 2 MCUFWDL */
-#define MCUFWDL_EN BIT(0)
-#define MCUFWDL_RDY BIT(1)
-#define FWDL_CHKSUM_RPT BIT(2)
-#define MACINI_RDY BIT(3)
-#define BBINI_RDY BIT(4)
-#define RFINI_RDY BIT(5)
-#define WINTINI_RDY BIT(6)
-#define RAM_DL_SEL BIT(7) /* 1:RAM, 0:ROM */
-#define ROM_DLEN BIT(19)
-#define CPRST BIT(23)
-
-/* 2 REG_SYS_CFG */
-#define XCLK_VLD BIT(0)
-#define ACLK_VLD BIT(1)
-#define UCLK_VLD BIT(2)
-#define PCLK_VLD BIT(3)
-#define PCIRSTB BIT(4)
-#define V15_VLD BIT(5)
-#define SW_OFFLOAD_EN BIT(7)
-#define SIC_IDLE BIT(8)
-#define BD_MAC2 BIT(9)
-#define BD_MAC1 BIT(10)
-#define IC_MACPHY_MODE BIT(11)
-#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
-#define BT_FUNC BIT(16)
-#define VENDOR_ID BIT(19)
-#define PAD_HWPD_IDN BIT(22)
-#define TRP_VAUX_EN BIT(23) /* RTL ID */
-#define TRP_BT_EN BIT(24)
-#define BD_PKG_SEL BIT(25)
-#define BD_HCI_SEL BIT(26)
-#define TYPE_ID BIT(27)
-
-#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
-#define CHIP_VER_RTL_SHIFT 12
-
-/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
-#define EFS_HCI_SEL (BIT(0)|BIT(1))
-#define PAD_HCI_SEL (BIT(2)|BIT(3))
-#define HCI_SEL (BIT(4)|BIT(5))
-#define PKG_SEL_HCI BIT(6)
-#define FEN_GPS BIT(7)
-#define FEN_BT BIT(8)
-#define FEN_WL BIT(9)
-#define FEN_PCI BIT(10)
-#define FEN_USB BIT(11)
-#define BTRF_HWPDN_N BIT(12)
-#define WLRF_HWPDN_N BIT(13)
-#define PDN_BT_N BIT(14)
-#define PDN_GPS_N BIT(15)
-#define BT_CTL_HWPDN BIT(16)
-#define GPS_CTL_HWPDN BIT(17)
-#define PPHY_SUSB BIT(20)
-#define UPHY_SUSB BIT(21)
-#define PCI_SUSEN BIT(22)
-#define USB_SUSEN BIT(23)
-#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
-
-/* 2SYS_CFG */
-#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
-
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-
-/* 2 Function Enable Registers */
-/* 2 CR */
-
-#define HCI_TXDMA_EN BIT(0)
-#define HCI_RXDMA_EN BIT(1)
-#define TXDMA_EN BIT(2)
-#define RXDMA_EN BIT(3)
-#define PROTOCOL_EN BIT(4)
-#define SCHEDULE_EN BIT(5)
-#define MACTXEN BIT(6)
-#define MACRXEN BIT(7)
-#define ENSWBCN BIT(8)
-#define ENSEC BIT(9)
-#define CALTMR_EN BIT(10) /* 32k CAL TMR enable */
-
-/* Network type */
-#define _NETTYPE(x) (((x) & 0x3) << 16)
-#define MASK_NETTYPE 0x30000
-#define NT_NO_LINK 0x0
-#define NT_LINK_AD_HOC 0x1
-#define NT_LINK_AP 0x2
-#define NT_AS_AP 0x3
-
-/* 2 PBP - Page Size Register */
-#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
-#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
-#define _PSRX_MASK 0xF
-#define _PSTX_MASK 0xF0
-#define _PSRX(x) (x)
-#define _PSTX(x) ((x) << 4)
-
-#define PBP_128 0x1
-
-/* 2 TX/RXDMA */
-#define RXDMA_ARBBW_EN BIT(0)
-#define RXSHFT_EN BIT(1)
-#define RXDMA_AGG_EN BIT(2)
-#define QS_VO_QUEUE BIT(8)
-#define QS_VI_QUEUE BIT(9)
-#define QS_BE_QUEUE BIT(10)
-#define QS_BK_QUEUE BIT(11)
-#define QS_MANAGER_QUEUE BIT(12)
-#define QS_HIGH_QUEUE BIT(13)
-
-#define HQSEL_VOQ BIT(0)
-#define HQSEL_VIQ BIT(1)
-#define HQSEL_BEQ BIT(2)
-#define HQSEL_BKQ BIT(3)
-#define HQSEL_MGTQ BIT(4)
-#define HQSEL_HIQ BIT(5)
-
-/* For normal driver, 0x10C */
-#define _TXDMA_HIQ_MAP(x) (((x) & 0x3) << 14)
-#define _TXDMA_MGQ_MAP(x) (((x) & 0x3) << 12)
-#define _TXDMA_BKQ_MAP(x) (((x) & 0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x) & 0x3) << 8)
-#define _TXDMA_VIQ_MAP(x) (((x) & 0x3) << 6)
-#define _TXDMA_VOQ_MAP(x) (((x) & 0x3) << 4)
-
-#define QUEUE_LOW 1
-#define QUEUE_NORMAL 2
-#define QUEUE_HIGH 3
-
-/* 2 TRXFF_BNDY */
-
-/* 2 LLT_INIT */
-#define _LLT_NO_ACTIVE 0x0
-#define _LLT_WRITE_ACCESS 0x1
-#define _LLT_READ_ACCESS 0x2
-
-#define _LLT_INIT_DATA(x) ((x) & 0xFF)
-#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
-#define _LLT_OP(x) (((x) & 0x3) << 30)
-#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
-
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-
-#define NUM_HQ 0x29
-
-#define LD_RQPN BIT(31)
-
-/* 2TDECTRL */
-#define BCN_VALID BIT(16)
-#define BCN_HEAD(x) (((x) & 0xFF) << 8)
-#define BCN_HEAD_MASK 0xFF00
-
-/* 2 TDECTL */
-#define BLK_DESC_NUM_SHIFT 4
-#define BLK_DESC_NUM_MASK 0xF
-
-/* 2 TXDMA_OFFSET_CHK */
-#define DROP_DATA_EN BIT(9)
-
-/* 0x0280h ~ 0x028Bh RX DMA Configuration */
-
-/* REG_RXDMA_CONTROL, 0x0286h */
-
-/* 2 REG_RXPKT_NUM, 0x0284 */
-#define RXPKT_RELEASE_POLL BIT(16)
-#define RXDMA_IDLE BIT(17)
-#define RW_RELEASE_EN BIT(18)
-
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-/* 2 FWHW_TXQ_CTRL */
-#define EN_AMPDU_RTY_NEW BIT(7)
-
-/* 2 SPEC SIFS */
-#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
-#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
-
-/* 2 RL */
-#define RETRY_LIMIT_SHORT_SHIFT 8
-#define RETRY_LIMIT_LONG_SHIFT 0
-
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-
-/* 2 EDCA setting */
-#define AC_PARAM_TXOP_LIMIT_OFFSET 16
-#define AC_PARAM_ECW_MAX_OFFSET 12
-#define AC_PARAM_ECW_MIN_OFFSET 8
-#define AC_PARAM_AIFS_OFFSET 0
-
-#define _LRL(x) ((x) & 0x3F)
-#define _SRL(x) (((x) & 0x3F) << 8)
-
-/* 2 BCN_CTRL */
-#define EN_MBSSID BIT(1)
-#define EN_TXBCN_RPT BIT(2)
-#define EN_BCN_FUNCTION BIT(3)
-#define DIS_TSF_UPDATE BIT(3)
-
-/* The same function but different bit field. */
-#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
-#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
-#define STOP_BCNQ BIT(6)
-
-/* 2 ACMHWCTRL */
-#define ACMHW_BEQEN BIT(1)
-#define ACMHW_VIQEN BIT(2)
-#define ACMHW_VOQEN BIT(3)
-
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-/* 2APSD_CTRL */
-#define APSDOFF BIT(6)
-#define APSDOFF_STATUS BIT(7)
-
-#define RATE_BITMAP_ALL 0xFFFFF
-
-/* Only use CCK 1M rate for ACK */
-#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
-
-/* 2 TCR */
-#define TSFRST BIT(0)
-#define DIS_GCLK BIT(1)
-#define PAD_SEL BIT(2)
-#define PWR_ST BIT(6)
-#define PWRBIT_OW_EN BIT(7)
-#define ACRC BIT(8)
-#define CFENDFORM BIT(9)
-#define ICV BIT(10)
-
-/* 2 RCR */
-#define AAP BIT(0)
-#define APM BIT(1)
-#define AM BIT(2)
-#define AB BIT(3)
-#define ADD3 BIT(4)
-#define APWRMGT BIT(5)
-#define CBSSID BIT(6)
-#define CBSSID_DATA BIT(6)
-#define CBSSID_BCN BIT(7)
-#define ACRC32 BIT(8)
-#define AICV BIT(9)
-#define ADF BIT(11)
-#define ACF BIT(12)
-#define AMF BIT(13)
-#define HTC_LOC_CTRL BIT(14)
-#define UC_DATA_EN BIT(16)
-#define BM_DATA_EN BIT(17)
-#define MFBEN BIT(22)
-#define LSIGEN BIT(23)
-#define EnMBID BIT(24)
-#define APP_BASSN BIT(27)
-#define APP_PHYSTS BIT(28)
-#define APP_ICV BIT(29)
-#define APP_MIC BIT(30)
-#define APP_FCS BIT(31)
-
-/* 2 SECCFG */
-#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
-#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
-#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
-#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
-#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
-#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
-#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
-#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
-
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-
-/* 2 USB Information (0xFE17) */
-#define USB_IS_HIGH_SPEED 0
-#define USB_IS_FULL_SPEED 1
-#define USB_SPEED_MASK BIT(5)
-
-#define USB_NORMAL_SIE_EP_MASK 0xF
-#define USB_NORMAL_SIE_EP_SHIFT 4
-
-/* 2 Special Option */
-#define USB_AGG_EN BIT(3)
-
-/* 0; Use interrupt endpoint to upload interrupt pkt */
-/* 1; Use bulk endpoint to upload interrupt pkt, */
-#define INT_BULK_SEL BIT(4)
-
-/* 2REG_C2HEVT_CLEAR */
-/* Set by driver and notify FW that the driver has read
- * the C2H command message */
-#define C2H_EVT_HOST_CLOSE 0x00
-/* Set by FW indicating that FW had set the C2H command
- * message and it's not yet read by driver. */
-#define C2H_EVT_FW_CLOSE 0xFF
-
-/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
-/* Enable GPIO[9] as WiFi HW PDn source */
-#define WL_HWPDN_EN BIT(0)
-/* WiFi HW PDn polarity control */
-#define WL_HWPDN_SL BIT(1)
-/* WiFi function enable */
-#define WL_FUNC_EN BIT(2)
-/* Enable GPIO[9] as WiFi RF HW PDn source */
-#define WL_HWROF_EN BIT(3)
-/* Enable GPIO[11] as BT HW PDn source */
-#define BT_HWPDN_EN BIT(16)
-/* BT HW PDn polarity control */
-#define BT_HWPDN_SL BIT(17)
-/* BT function enable */
-#define BT_FUNC_EN BIT(18)
-/* Enable GPIO[11] as BT/GPS RF HW PDn source */
-#define BT_HWROF_EN BIT(19)
-/* Enable GPIO[10] as GPS HW PDn source */
-#define GPS_HWPDN_EN BIT(20)
-/* GPS HW PDn polarity control */
-#define GPS_HWPDN_SL BIT(21)
-/* GPS function enable */
-#define GPS_FUNC_EN BIT(22)
-
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK BIT(3)
-#define HAL92C_EN_PKT_LIFE_TIME_BE BIT(2)
-#define HAL92C_EN_PKT_LIFE_TIME_VI BIT(1)
-#define HAL92C_EN_PKT_LIFE_TIME_VO BIT(0)
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us */
-
-/* General definitions */
-#define LAST_ENTRY_OF_TX_PKT_BUFFER 176 /* 22k 22528 bytes */
-
-#define POLLING_LLT_THRESHOLD 20
-#define POLLING_READY_TIMEOUT_COUNT 1000
-/* GPIO BIT */
-#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
-
-/* 8192C EEPROM/EFUSE share register definition. */
-
-/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
-#define EEPROM_TX_PWR_INX_88E 0x10
-
-#define EEPROM_ChannelPlan_88E 0xB8
-#define EEPROM_XTAL_88E 0xB9
-#define EEPROM_THERMAL_METER_88E 0xBA
-#define EEPROM_IQK_LCK_88E 0xBB
-
-#define EEPROM_RF_BOARD_OPTION_88E 0xC1
-#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
-#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
-
-/* RTL88EU */
-#define EEPROM_MAC_ADDR_88EU 0xD7
-#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
-
-/* RTL88ES */
-#define EEPROM_MAC_ADDR_88ES 0x11A
-
-#define EEPROM_Default_CrystalCap_88E 0x20
-#define EEPROM_Default_ThermalMeter_88E 0x18
-
-/* New EFUSE default value */
-#define EEPROM_DEFAULT_24G_INDEX 0x2D
-#define EEPROM_DEFAULT_24G_HT20_DIFF 0X02
-#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
-
-#define EEPROM_DEFAULT_DIFF 0XFE
-#define EEPROM_DEFAULT_BOARD_OPTION 0x00
-
-#define EEPROM_CHANNEL_PLAN_FCC 0x0
-#define EEPROM_CHANNEL_PLAN_IC 0x1
-#define EEPROM_CHANNEL_PLAN_ETSI 0x2
-#define EEPROM_CHANNEL_PLAN_SPA 0x3
-#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
-#define EEPROM_CHANNEL_PLAN_MKK 0x5
-#define EEPROM_CHANNEL_PLAN_MKK1 0x6
-#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
-#define EEPROM_CHANNEL_PLAN_TELEC 0x8
-#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMA 0x9
-#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
-#define EEPROM_CHANNEL_PLAN_NCC 0xB
-#define EEPROM_USB_OPTIONAL1 0xE
-#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
-
-#define RTL_EEPROM_ID 0x8129
-
-#endif /* __RTL8188E_SPEC_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_xmit.h b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
deleted file mode 100644
index a023dd792da7..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_xmit.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_XMIT_H__
-#define __RTL8188E_XMIT_H__
-
-#define MAX_TX_AGG_PACKET_NUMBER 0xFF
-#define QSLT_MGNT 0x12
-
-/* For 88e early mode */
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- le32p_replace_bits((__le32 *)__paddr, __value, GENMASK(2, 0))
-#define SET_EARLYMODE_LEN0(__pAddr, __Value) \
- le32p_replace_bits((__le32 *)__paddr, __value, GENMASK(15, 4))
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- le32p_replace_bits((__le32 *)__paddr, __value, GENMASK(27, 16))
-#define SET_EARLYMODE_LEN2_1(__pdr, __vValue) \
- le32p_replace_bits((__le32 *)__paddr, __value, GENMASK(31, 28))
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- le32p_replace_bits((__le32 *)(__paddr + 4), __value, GENMASK(7, 0))
-#define SET_EARLYMODE_LEN3(__pAddr, __Value) \
- le32p_replace_bits((__le32 *)(__paddr + 4), __value, GENMASK(19, 8))
-#define SET_EARLYMODE_LEN4(__paAddr, __vValue) \
- le32p_replace_bits((__le32 *)(__paddr + 4), __value, GENMASK(31, 20))
-
-/* defined for TX DESC Operation */
-
-#define MAX_TID (15)
-
-/* OFFSET 0 */
-#define OFFSET_SZ 0
-#define OFFSET_SHT 16
-#define BMC BIT(24)
-#define LSG BIT(26)
-#define FSG BIT(27)
-#define OWN BIT(31)
-
-/* OFFSET 4 */
-#define PKT_OFFSET_SZ 0
-#define QSEL_SHT 8
-#define RATE_ID_SHT 16
-#define NAVUSEHDR BIT(20)
-#define SEC_TYPE_SHT 22
-#define PKT_OFFSET_SHT 26
-
-/* OFFSET 8 */
-#define AGG_EN BIT(12)
-#define AGG_BK BIT(16)
-#define AMPDU_DENSITY_SHT 20
-#define ANTSEL_A BIT(24)
-#define ANTSEL_B BIT(25)
-#define TX_ANT_CCK_SHT 26
-#define TX_ANTL_SHT 28
-#define TX_ANT_HT_SHT 30
-
-/* OFFSET 12 */
-#define SEQ_SHT 16
-#define EN_HWSEQ BIT(31)
-
-/* OFFSET 16 */
-#define QOS BIT(6)
-#define HW_SSN BIT(7)
-#define USERATE BIT(8)
-#define DISDATAFB BIT(10)
-#define CTS_2_SELF BIT(11)
-#define RTS_EN BIT(12)
-#define HW_RTS_EN BIT(13)
-#define DATA_SHORT BIT(24)
-#define PWR_STATUS_SHT 15
-#define DATA_SC_SHT 20
-#define DATA_BW BIT(25)
-
-/* OFFSET 20 */
-#define RTY_LMT_EN BIT(17)
-
-/* OFFSET 20 */
-#define SGI BIT(6)
-#define USB_TXAGG_NUM_SHT 24
-
-#define USB_TXAGG_DESC_NUM 0x6
-
-#define txdesc_set_ccx_sw_88e(txdesc, value) \
- do { \
- ((struct txdesc_88e *)(txdesc))->sw1 = (((value)>>8) & 0x0f); \
- ((struct txdesc_88e *)(txdesc))->sw0 = ((value) & 0xff); \
- } while (0)
-
-struct txrpt_ccx_88e {
- /* offset 0 */
- u8 tag1:1;
- u8 pkt_num:3;
- u8 txdma_underflow:1;
- u8 int_bt:1;
- u8 int_tri:1;
- u8 int_ccx:1;
-
- /* offset 1 */
- u8 mac_id:6;
- u8 pkt_ok:1;
- u8 bmc:1;
-
- /* offset 2 */
- u8 retry_cnt:6;
- u8 lifetime_over:1;
- u8 retry_over:1;
-
- /* offset 3 */
- u8 ccx_qtime0;
- u8 ccx_qtime1;
-
- /* offset 5 */
- u8 final_data_rate;
-
- /* offset 6 */
- u8 sw1:4;
- u8 qsel:4;
-
- /* offset 7 */
- u8 sw0;
-};
-
-void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
- u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
-s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
-s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
-s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
-void rtl8188eu_xmit_tasklet(unsigned long priv);
-bool rtl8188eu_xmitframe_complete(struct adapter *padapter);
-
-#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_ap.h b/drivers/staging/r8188eu/include/rtw_ap.h
deleted file mode 100644
index 89b02c97e041..000000000000
--- a/drivers/staging/r8188eu/include/rtw_ap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef __RTW_AP_H_
-#define __RTW_AP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-/* external function */
-void rtw_indicate_sta_assoc_event(struct adapter *padapter,
- struct sta_info *psta);
-void init_mlme_ap_info(struct adapter *padapter);
-void free_mlme_ap_info(struct adapter *padapter);
-void update_beacon(struct adapter *padapter, u8 ie_id,
- u8 *oui, u8 tx);
-void add_RATid(struct adapter *padapter, struct sta_info *psta,
- u8 rssi_level);
-void expire_timeout_chk(struct adapter *padapter);
-void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
-void rtw_ap_restore_network(struct adapter *padapter);
-
-void associated_clients_update(struct adapter *padapter, u8 updated);
-void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta);
-u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta);
-void sta_info_update(struct adapter *padapter, struct sta_info *psta);
-u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
- bool active, u16 reason);
-void rtw_sta_flush(struct adapter *padapter);
-void start_ap_mode(struct adapter *padapter);
-void stop_ap_mode(struct adapter *padapter);
-void update_bmc_sta(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_br_ext.h b/drivers/staging/r8188eu/include/rtw_br_ext.h
deleted file mode 100644
index 56772af3bec5..000000000000
--- a/drivers/staging/r8188eu/include/rtw_br_ext.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_BR_EXT_H_
-#define _RTW_BR_EXT_H_
-
-#define GET_MY_HWADDR(padapter) ((padapter)->eeprompriv.mac_addr)
-
-#define NAT25_HASH_BITS 4
-#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
-#define NAT25_AGEING_TIME 300
-
-#define MAX_NETWORK_ADDR_LEN 17
-
-struct nat25_network_db_entry {
- struct nat25_network_db_entry *next_hash;
- struct nat25_network_db_entry **pprev_hash;
- atomic_t use_count;
- unsigned char macAddr[6];
- unsigned long ageing_timer;
- unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
-};
-
-enum NAT25_METHOD {
- NAT25_MIN,
- NAT25_CHECK,
- NAT25_INSERT,
- NAT25_PARSE,
- NAT25_MAX
-};
-
-struct br_ext_info {
- unsigned int nat25_disable;
- unsigned int macclone_enable;
- unsigned int dhcp_bcst_disable;
- int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
- unsigned char nat25_dmzMac[ETH_ALEN];
- unsigned int nat25sc_disable;
-};
-
-void nat25_db_cleanup(struct adapter *priv);
-
-#endif /* _RTW_BR_EXT_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
deleted file mode 100644
index e8eecd52d1d8..000000000000
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ /dev/null
@@ -1,925 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_CMD_H_
-#define __RTW_CMD_H_
-
-#include "wlan_bssdef.h"
-#include "rtw_rf.h"
-
-#include "osdep_service.h"
-#include "ieee80211.h" /* <ieee80211/ieee80211.h> */
-
-#define MAX_CMDSZ 1024
-#define MAX_RSPSZ 512
-#define MAX_EVTSZ 1024
-
-#define CMDBUFF_ALIGN_SZ 512
-
-struct cmd_obj {
- struct adapter *padapter;
- u16 cmdcode;
- u8 res;
- u8 *parmbuf;
- u32 cmdsz;
- u8 *rsp;
- u32 rspsz;
- struct list_head list;
-};
-
-struct cmd_priv {
- struct completion enqueue_cmd;
- struct completion start_cmd_thread;
- struct completion stop_cmd_thread;
- struct __queue cmd_queue;
- u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
- u8 *cmd_allocated_buf;
- u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
- u8 *rsp_allocated_buf;
- u32 cmd_done_cnt;
- u32 rsp_cnt;
- u8 cmdthd_running;
- struct adapter *padapter;
-};
-
-struct evt_priv {
- struct work_struct c2h_wk;
- bool c2h_wk_alive;
- struct rtw_cbuf *c2h_queue;
- #define C2H_QUEUE_MAX_LEN 10
- atomic_t event_seq;
- u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
-};
-
-#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
-do {\
- INIT_LIST_HEAD(&pcmd->list);\
- pcmd->cmdcode = code;\
- pcmd->parmbuf = (u8 *)(pparm);\
- pcmd->cmdsz = sizeof(*pparm);\
- pcmd->rsp = NULL;\
- pcmd->rspsz = 0;\
-} while (0)
-
-struct c2h_evt_hdr {
- u8 id:4;
- u8 plen:4;
- u8 seq;
- u8 payload[];
-};
-
-#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
-
-u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
-struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv);
-void rtw_free_cmd_obj(struct cmd_obj *pcmd);
-
-int rtw_cmd_thread(void *context);
-
-int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
-
-int rtw_init_evt_priv(struct evt_priv *pevtpriv);
-void rtw_free_evt_priv(struct evt_priv *pevtpriv);
-void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
-u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType);
-
-enum rtw_drvextra_cmd_id {
- NONE_WK_CID,
- DYNAMIC_CHK_WK_CID,
- DM_CTRL_WK_CID,
- PBC_POLLING_WK_CID,
- POWER_SAVING_CTRL_WK_CID,/* IPS,AUTOSuspend */
- LPS_CTRL_WK_CID,
- ANT_SELECT_WK_CID,
- P2P_PS_WK_CID,
- P2P_PROTO_WK_CID,
- CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
- INTEl_WIDI_WK_CID,
- C2H_WK_CID,
- RTP_TIMER_CFG_WK_CID,
- MAX_WK_CID
-};
-
-enum LPS_CTRL_TYPE {
- LPS_CTRL_SCAN = 0,
- LPS_CTRL_JOINBSS = 1,
- LPS_CTRL_CONNECT = 2,
- LPS_CTRL_DISCONNECT = 3,
- LPS_CTRL_SPECIAL_PACKET = 4,
- LPS_CTRL_LEAVE = 5,
-};
-
-enum RFINTFS {
- SWSI,
- HWSI,
- HWPI,
-};
-
-/*
-Caller Mode: Infra, Ad-HoC
-
-Notes: To join a known BSS.
-
-Command-Event Mode
-
-*/
-
-/*
-Caller Mode: Infra, Ad-Hoc
-
-Notes: To join the specified bss
-
-Command Event Mode
-
-*/
-struct joinbss_parm {
- struct wlan_bssid_ex network;
-};
-
-/*
-Caller Mode: Infra, Ad-HoC(C)
-
-Notes: To disconnect the current associated BSS
-
-Command Mode
-
-*/
-struct disconnect_parm {
- u32 deauth_timeout_ms;
-};
-
-/*
-Caller Mode: AP, Ad-HoC(M)
-
-Notes: To create a BSS
-
-Command Mode
-*/
-struct createbss_parm {
- struct wlan_bssid_ex network;
-};
-
-struct setopmode_parm {
- u8 mode;
- u8 rsvd[3];
-};
-
-/*
-Caller Mode: AP, Ad-HoC, Infra
-
-Notes: To ask RTL8711 performing site-survey
-
-Command-Event Mode
-
-*/
-
-#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
-#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
-struct sitesurvey_parm {
- int scan_mode; /* active: 1, passive: 0 */
- u8 ssid_num;
- u8 ch_num;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To set the auth type of RTL8711. open/shared/802.1x
-
-Command Mode
-
-*/
-struct setauth_parm {
- u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
- u8 _1x; /* 0: PSK, 1: TLS */
- u8 rsvd[2];
-};
-
-/*
-Caller Mode: Infra
-
-a. algorithm: wep40, wep104, tkip & aes
-b. keytype: grp key/unicast key
-c. key contents
-
-when shared key ==> keyid is the camid
-when 802.1x ==> keyid [0:1] ==> grp key
-when 802.1x ==> keyid > 2 ==> unicast key
-
-*/
-struct setkey_parm {
- u8 algorithm; /* could be none, wep40, TKIP, CCMP, wep104 */
- u8 keyid;
- u8 grpkey; /* 1: this is the grpkey for 802.1x.
- * 0: this is the unicast key for 802.1x */
- u8 set_tx; /* 1: main tx key for wep. 0: other key. */
- u8 key[16]; /* this could be 40 or 104 */
-};
-
-/*
-When in AP or Ad-Hoc mode, this is used to
-allocate an sw/hw entry for a newly associated sta.
-
-Command
-
-when shared key ==> algorithm/keyid
-
-*/
-struct set_stakey_parm {
- u8 addr[ETH_ALEN];
- u8 algorithm;
- u8 id;/* currently for erasing cam entry if
- * algorithm == _NO_PRIVACY_ */
- u8 key[16];
-};
-
-struct set_stakey_rsp {
- u8 addr[ETH_ALEN];
- u8 keyid;
- u8 rsvd;
-};
-
-/*
-Caller Ad-Hoc/AP
-
-Command -Rsp(AID == CAMID) mode
-
-This is to force fw to add an sta_data entry per driver's request.
-
-FW will write an cam entry associated with it.
-
-*/
-struct set_assocsta_parm {
- u8 addr[ETH_ALEN];
-};
-
-struct set_assocsta_rsp {
- u8 cam_id;
- u8 rsvd[3];
-};
-
-/*
- Caller Ad-Hoc/AP
-
- Command mode
-
- This is to force fw to del an sta_data entry per driver's request
-
- FW will invalidate the cam entry associated with it.
-
-*/
-struct del_assocsta_parm {
- u8 addr[ETH_ALEN];
-};
-
-/*
-Caller Mode: AP/Ad-HoC(M)
-
-Notes: To notify fw that given staid has changed its power state
-
-Command Mode
-
-*/
-struct setstapwrstate_parm {
- u8 staid;
- u8 status;
- u8 hwaddr[6];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To setup the basic rate of RTL8711
-
-Command Mode
-
-*/
-struct setbasicrate_parm {
- u8 basicrates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To read the current basic rate
-
-Command-Rsp Mode
-
-*/
-struct getbasicrate_parm {
- u32 rsvd;
-};
-
-struct getbasicrate_rsp {
- u8 basicrates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To setup the data rate of RTL8711
-
-Command Mode
-
-*/
-struct setdatarate_parm {
- u8 mac_id;
- u8 datarates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To read the current data rate
-
-Command-Rsp Mode
-
-*/
-struct getdatarate_parm {
- u32 rsvd;
-
-};
-struct getdatarate_rsp {
- u8 datarates[NumRates];
-};
-
-/*
-Caller Mode: Any
-AP: AP can use the info for the contents of beacon frame
-Infra: STA can use the info when sitesurveying
-Ad-HoC(M): Like AP
-Ad-HoC(C): Like STA
-
-Notes: To set the phy capability of the NIC
-
-Command Mode
-
-*/
-
-struct setphyinfo_parm {
- struct regulatory_class class_sets[NUM_REGULATORYS];
- u8 status;
-};
-
-struct getphyinfo_parm {
- u32 rsvd;
-};
-
-struct getphyinfo_rsp {
- struct regulatory_class class_sets[NUM_REGULATORYS];
- u8 status;
-};
-
-/*
-Caller Mode: Any
-
-Notes: To set the channel/modem/band
-This command will be used when channel/modem/band is changed.
-
-Command Mode
-
-*/
-struct setphy_parm {
- u8 rfchannel;
- u8 modem;
-};
-
-/*
-Caller Mode: Any
-
-Notes: To get the current setting of channel/modem/band
-
-Command-Rsp Mode
-
-*/
-struct getphy_parm {
- u32 rsvd;
-
-};
-struct getphy_rsp {
- u8 rfchannel;
- u8 modem;
-};
-
-struct readBB_parm {
- u8 offset;
-};
-struct readBB_rsp {
- u8 value;
-};
-
-struct readTSSI_parm {
- u8 offset;
-};
-struct readTSSI_rsp {
- u8 value;
-};
-
-struct writeBB_parm {
- u8 offset;
- u8 value;
-};
-
-struct readRF_parm {
- u8 offset;
-};
-struct readRF_rsp {
- u32 value;
-};
-
-struct writeRF_parm {
- u32 offset;
- u32 value;
-};
-
-struct getrfintfs_parm {
- u8 rfintfs;
-};
-
-struct Tx_Beacon_param {
- struct wlan_bssid_ex network;
-};
-
-/*
- Notes: This command is used for H2C/C2H loopback testing
-
- mac[0] == 0
- ==> CMD mode, return H2C_SUCCESS.
- The following condition must be true under CMD mode
- mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
- s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
- s2 == (b1 << 8 | b0);
-
- mac[0] == 1
- ==> CMD_RSP mode, return H2C_SUCCESS_RSP
-
- The rsp layout shall be:
- rsp: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = mac[3];
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = s1;
- s1 = swap16(s0);
- w0 = swap32(w1);
- b0 = b1
- s2 = s0 + s1
- b1 = b0
- w1 = w0
-
- mac[0] == 2
- ==> CMD_EVENT mode, return H2C_SUCCESS
- The event layout shall be:
- event: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = event's seq no, starting from 1 to parm's marc[3]
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = swap16(s0) - event.mac[2];
- s1 = s1 + event.mac[2];
- w0 = swap32(w0);
- b0 = b1
- s2 = s0 + event.mac[2]
- b1 = b0
- w1 = swap32(w1) - event.mac[2];
-
- parm->mac[3] is the total event counts that host requested.
- event will be the same with the cmd's param.
-*/
-
-/* CMD param Format for driver extra cmd handler */
-struct drvextra_cmd_parm {
- int ec_id; /* extra cmd id */
- int type_size; /* Can use this field as the type id or command size */
- unsigned char *pbuf;
-};
-
-/*------------------- Below are used for RF/BB tuning ---------------------*/
-
-struct setantenna_parm {
- u8 tx_antset;
- u8 rx_antset;
- u8 tx_antenna;
- u8 rx_antenna;
-};
-
-struct enrateadaptive_parm {
- u32 en;
-};
-
-struct settxagctbl_parm {
- u32 txagc[MAX_RATES_LENGTH];
-};
-
-struct gettxagctbl_parm {
- u32 rsvd;
-};
-struct gettxagctbl_rsp {
- u32 txagc[MAX_RATES_LENGTH];
-};
-
-struct setagcctrl_parm {
- u32 agcctrl; /* 0: pure hw, 1: fw */
-};
-
-struct setssup_parm {
- u32 ss_ForceUp[MAX_RATES_LENGTH];
-};
-
-struct getssup_parm {
- u32 rsvd;
-};
-
-struct getssup_rsp {
- u8 ss_ForceUp[MAX_RATES_LENGTH];
-};
-
-struct setssdlevel_parm {
- u8 ss_DLevel[MAX_RATES_LENGTH];
-};
-
-struct getssdlevel_parm {
- u32 rsvd;
-};
-
-struct getssdlevel_rsp {
- u8 ss_DLevel[MAX_RATES_LENGTH];
-};
-
-struct setssulevel_parm {
- u8 ss_ULevel[MAX_RATES_LENGTH];
-};
-
-struct getssulevel_parm {
- u32 rsvd;
-};
-
-struct getssulevel_rsp {
- u8 ss_ULevel[MAX_RATES_LENGTH];
-};
-
-struct setcountjudge_parm {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
-struct getcountjudge_parm {
- u32 rsvd;
-};
-
-struct getcountjudge_rsp {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
-struct setratable_parm {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
-};
-
-struct getratable_parm {
- uint rsvd;
-};
-
-struct getratable_rsp {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
-};
-
-/* to get TX,RX retry count */
-
-struct gettxretrycnt_parm {
- unsigned int rsvd;
-};
-
-struct gettxretrycnt_rsp {
- unsigned long tx_retrycnt;
-};
-
-struct getrxretrycnt_parm {
- unsigned int rsvd;
-};
-
-struct getrxretrycnt_rsp {
- unsigned long rx_retrycnt;
-};
-
-/* to get BCNOK,BCNERR count */
-struct getbcnokcnt_parm {
- unsigned int rsvd;
-};
-
-struct getbcnokcnt_rsp {
- unsigned long bcnokcnt;
-};
-
-struct getbcnerrcnt_parm {
- unsigned int rsvd;
-};
-
-struct getbcnerrcnt_rsp {
- unsigned long bcnerrcnt;
-};
-
-/* to get current TX power level */
-struct getcurtxpwrlevel_parm {
- unsigned int rsvd;
-};
-struct getcurtxpwrlevel_rspi {
- unsigned short tx_power;
-};
-
-struct setprobereqextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[];
-};
-
-struct setassocreqextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[];
-};
-
-struct setproberspextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[];
-};
-
-struct setassocrspextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[];
-};
-
-struct addBaReq_parm {
- unsigned int tid;
- u8 addr[ETH_ALEN];
-};
-
-/*H2C Handler index: 46 */
-struct set_ch_parm {
- u8 ch;
- u8 bw;
- u8 ch_offset;
-};
-
-/*H2C Handler index: 59 */
-struct SetChannelPlan_param {
- u8 channel_plan;
-};
-
-/*H2C Handler index: 60 */
-struct LedBlink_param {
- struct LED_871x *pLed;
-};
-
-/*H2C Handler index: 61 */
-struct SetChannelSwitch_param {
- u8 new_ch_no;
-};
-
-/*H2C Handler index: 62 */
-struct TDLSoption_param {
- u8 addr[ETH_ALEN];
- u8 option;
-};
-
-#define GEN_CMD_CODE(cmd) cmd ## _CMD_
-
-/*
-
-Result:
-0x00: success
-0x01: success, and check Response.
-0x02: cmd ignored due to duplicated sequcne number
-0x03: cmd dropped due to invalid cmd code
-0x04: reserved.
-
-*/
-
-#define H2C_RSP_OFFSET 512
-
-#define H2C_SUCCESS 0x00
-#define H2C_SUCCESS_RSP 0x01
-#define H2C_DUPLICATED 0x02
-#define H2C_DROPPED 0x03
-#define H2C_PARAMETERS_ERROR 0x04
-#define H2C_REJECTED 0x05
-#define H2C_CMD_OVERFLOW 0x06
-#define H2C_RESERVED 0x07
-
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num);
-u8 rtw_createbss_cmd(struct adapter *padapter);
-u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
-u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
-u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork);
-u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
-u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
-int rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
-
-u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
-u8 rtw_setfwdig_cmd(struct adapter *padapter, u8 type);
-u8 rtw_setfwra_cmd(struct adapter *padapter, u8 type);
-
-u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr);
-
-u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
-
-u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue);
-u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 minRptTime);
-
-u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue);
-u8 rtw_ps_cmd(struct adapter *padapter);
-
-void rtw_chk_hi_queue_cmd(struct adapter *padapter);
-
-u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan);
-
-u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
-
-u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf);
-
-void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
-void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
-void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
-void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
-void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-
-void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
-void rtw_getrttbl_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-
-struct _cmd_callback {
- u32 cmd_code;
- void (*callback)(struct adapter *padapter, struct cmd_obj *cmd);
-};
-
-enum rtw_h2c_cmd {
- GEN_CMD_CODE(_Read_MACREG), /*0*/
- GEN_CMD_CODE(_Write_MACREG),
- GEN_CMD_CODE(_Read_BBREG),
- GEN_CMD_CODE(_Write_BBREG),
- GEN_CMD_CODE(_Read_RFREG),
- GEN_CMD_CODE(_Write_RFREG), /*5*/
- GEN_CMD_CODE(_Read_EEPROM),
- GEN_CMD_CODE(_Write_EEPROM),
- GEN_CMD_CODE(_Read_EFUSE),
- GEN_CMD_CODE(_Write_EFUSE),
-
- GEN_CMD_CODE(_Read_CAM), /*10*/
- GEN_CMD_CODE(_Write_CAM),
- GEN_CMD_CODE(_setBCNITV),
- GEN_CMD_CODE(_setMBIDCFG),
- GEN_CMD_CODE(_JoinBss), /*14*/
- GEN_CMD_CODE(_DisConnect), /*15*/
- GEN_CMD_CODE(_CreateBss),
- GEN_CMD_CODE(_SetOpMode),
- GEN_CMD_CODE(_SiteSurvey), /*18*/
- GEN_CMD_CODE(_SetAuth),
-
- GEN_CMD_CODE(_SetKey), /*20*/
- GEN_CMD_CODE(_SetStaKey),
- GEN_CMD_CODE(_SetAssocSta),
- GEN_CMD_CODE(_DelAssocSta),
- GEN_CMD_CODE(_SetStaPwrState),
- GEN_CMD_CODE(_SetBasicRate), /*25*/
- GEN_CMD_CODE(_GetBasicRate),
- GEN_CMD_CODE(_SetDataRate),
- GEN_CMD_CODE(_GetDataRate),
- GEN_CMD_CODE(_SetPhyInfo),
-
- GEN_CMD_CODE(_GetPhyInfo), /*30*/
- GEN_CMD_CODE(_SetPhy),
- GEN_CMD_CODE(_GetPhy),
- GEN_CMD_CODE(_readRssi),
- GEN_CMD_CODE(_readGain),
- GEN_CMD_CODE(_SetAtim), /*35*/
- GEN_CMD_CODE(_SetPwrMode),
- GEN_CMD_CODE(_JoinbssRpt),
- GEN_CMD_CODE(_SetRaTable),
- GEN_CMD_CODE(_GetRaTable),
-
- GEN_CMD_CODE(_GetCCXReport), /*40*/
- GEN_CMD_CODE(_GetDTMReport),
- GEN_CMD_CODE(_GetTXRateStatistics),
- GEN_CMD_CODE(_SetUsbSuspend),
- GEN_CMD_CODE(_SetH2cLbk),
- GEN_CMD_CODE(_AddBAReq), /*45*/
- GEN_CMD_CODE(_SetChannel), /*46*/
- GEN_CMD_CODE(_SetTxPower),
- GEN_CMD_CODE(_SwitchAntenna),
- GEN_CMD_CODE(_SetCrystalCap),
- GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
-
- GEN_CMD_CODE(_SetSingleToneTx),/*51*/
- GEN_CMD_CODE(_SetCarrierSuppressionTx),
- GEN_CMD_CODE(_SetContinuousTx),
- GEN_CMD_CODE(_SwitchBandwidth), /*54*/
- GEN_CMD_CODE(_TX_Beacon), /*55*/
-
- GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
- GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
- GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
-
- GEN_CMD_CODE(_SetChannelPlan), /*59*/
- GEN_CMD_CODE(_LedBlink), /*60*/
-
- GEN_CMD_CODE(_SetChannelSwitch), /*61*/
- GEN_CMD_CODE(_TDLS), /*62*/
-
- MAX_H2CCMD
-};
-
-#define _GetBBReg_CMD_ _Read_BBREG_CMD_
-#define _SetBBReg_CMD_ _Write_BBREG_CMD_
-#define _GetRFReg_CMD_ _Read_RFREG_CMD_
-#define _SetRFReg_CMD_ _Write_RFREG_CMD_
-
-#ifdef _RTW_CMD_C_
-static struct _cmd_callback rtw_cmd_callback[] = {
- {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
- {GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
- {GEN_CMD_CODE(_Write_BBREG), NULL},
- {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback},
- {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
- {GEN_CMD_CODE(_Read_EEPROM), NULL},
- {GEN_CMD_CODE(_Write_EEPROM), NULL},
- {GEN_CMD_CODE(_Read_EFUSE), NULL},
- {GEN_CMD_CODE(_Write_EFUSE), NULL},
-
- {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
- {GEN_CMD_CODE(_Write_CAM), NULL},
- {GEN_CMD_CODE(_setBCNITV), NULL},
- {GEN_CMD_CODE(_setMBIDCFG), NULL},
- {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/
- {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/
- {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
- {GEN_CMD_CODE(_SetOpMode), NULL},
- {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/
- {GEN_CMD_CODE(_SetAuth), NULL},
-
- {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
- {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
- {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
- {GEN_CMD_CODE(_DelAssocSta), NULL},
- {GEN_CMD_CODE(_SetStaPwrState), NULL},
- {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
- {GEN_CMD_CODE(_GetBasicRate), NULL},
- {GEN_CMD_CODE(_SetDataRate), NULL},
- {GEN_CMD_CODE(_GetDataRate), NULL},
- {GEN_CMD_CODE(_SetPhyInfo), NULL},
-
- {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
- {GEN_CMD_CODE(_SetPhy), NULL},
- {GEN_CMD_CODE(_GetPhy), NULL},
- {GEN_CMD_CODE(_readRssi), NULL},
- {GEN_CMD_CODE(_readGain), NULL},
- {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
- {GEN_CMD_CODE(_SetPwrMode), NULL},
- {GEN_CMD_CODE(_JoinbssRpt), NULL},
- {GEN_CMD_CODE(_SetRaTable), NULL},
- {GEN_CMD_CODE(_GetRaTable), NULL},
-
- {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
- {GEN_CMD_CODE(_GetDTMReport), NULL},
- {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
- {GEN_CMD_CODE(_SetUsbSuspend), NULL},
- {GEN_CMD_CODE(_SetH2cLbk), NULL},
- {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
- {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
- {GEN_CMD_CODE(_SetTxPower), NULL},
- {GEN_CMD_CODE(_SwitchAntenna), NULL},
- {GEN_CMD_CODE(_SetCrystalCap), NULL},
- {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
-
- {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
- {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
- {GEN_CMD_CODE(_SetContinuousTx), NULL},
- {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
- {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
-
- {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
- {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
- {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
- {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
- {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
-
- {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
- {GEN_CMD_CODE(_TDLS), NULL},/*62*/
-};
-#endif
-
-#endif /* _CMD_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
deleted file mode 100644
index 94d735b1d0db..000000000000
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_EEPROM_H__
-#define __RTW_EEPROM_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-struct eeprom_priv {
- u8 bautoload_fail_flag;
- u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
-};
-
-#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h
deleted file mode 100644
index 3d688a0e6dfb..000000000000
--- a/drivers/staging/r8188eu/include/rtw_efuse.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_EFUSE_H__
-#define __RTW_EFUSE_H__
-
-#define EFUSE_MAX_WORD_UNIT 4
-
-void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_event.h b/drivers/staging/r8188eu/include/rtw_event.h
deleted file mode 100644
index 54dc1ea437fc..000000000000
--- a/drivers/staging/r8188eu/include/rtw_event.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_EVENT_H_
-#define _RTW_EVENT_H_
-
-#include "osdep_service.h"
-
-#include "wlan_bssdef.h"
-#include <linux/semaphore.h>
-#include <linux/sem.h>
-
-/*
-Used to report a bss has been scanned
-*/
-struct survey_event {
- struct wlan_bssid_ex bss;
-};
-
-/*
-Used to report that the requested site survey has been done.
-
-bss_cnt indicates the number of bss that has been reported.
-
-*/
-struct surveydone_event {
- unsigned int bss_cnt;
-
-};
-
-/*
-Used to report the link result of joinning the given bss
-
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-
-*/
-struct joinbss_event {
- struct wlan_network network;
-};
-
-/*
-Used to report a given STA has joinned the created BSS.
-It is used in AP/Ad-HoC(M) mode.
-*/
-
-struct stassoc_event {
- unsigned char macaddr[6];
- unsigned char rsvd[2];
- int cam_id;
-};
-
-struct stadel_event {
- unsigned char macaddr[6];
- unsigned char rsvd[2]; /* for reason */
- int mac_id;
-};
-
-struct addba_event {
- unsigned int tid;
-};
-
-#define GEN_EVT_CODE(event) event ## _EVT_
-
-struct fwevent {
- u32 parmsize;
- void (*event_callback)(struct adapter *dev, u8 *pbuf);
-};
-
-#define C2HEVENT_SZ 32
-
-struct event_node {
- unsigned char *node;
- unsigned char evt_code;
- unsigned short evt_sz;
- int *caller_ff_tail;
- int caller_ff_sz;
-};
-
-struct c2hevent_queue {
- int head;
- int tail;
- struct event_node nodes[C2HEVENT_SZ];
- unsigned char seq;
-};
-
-#define NETWORK_QUEUE_SZ 4
-
-struct network_queue {
- int head;
- int tail;
- struct wlan_bssid_ex networks[NETWORK_QUEUE_SZ];
-};
-
-#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_fw.h b/drivers/staging/r8188eu/include/rtw_fw.h
deleted file mode 100644
index 8f74157ee9ac..000000000000
--- a/drivers/staging/r8188eu/include/rtw_fw.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_FW_H__
-#define __RTW_FW_H__
-
-struct rt_firmware {
- u8 *data;
- u32 size;
-};
-
-#include "drv_types.h"
-
-int rtl8188e_firmware_download(struct adapter *padapter);
-void rtw_reset_8051(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_ht.h b/drivers/staging/r8188eu/include/rtw_ht.h
deleted file mode 100644
index 2b56b7c38c86..000000000000
--- a/drivers/staging/r8188eu/include/rtw_ht.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_HT_H_
-#define _RTW_HT_H_
-
-#include "osdep_service.h"
-#include "wifi.h"
-
-struct ht_priv {
- u32 ht_option;
- u32 ampdu_enable;/* for enable Tx A-MPDU */
- u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
- u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
- u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz,
- * updated when join_callback. */
- u8 bwmode;/* */
- u8 ch_offset;/* PRIME_CHNL_OFFSET */
- u8 sgi;/* short GI */
-
- /* for processing Tx A-MPDU */
- u8 agg_enable_bitmap;
- u8 candidate_tid_bitmap;
-
- struct ieee80211_ht_cap ht_cap;
-};
-
-#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
deleted file mode 100644
index e1718f739cc9..000000000000
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_IO_H_
-#define _RTW_IO_H_
-
-#include "osdep_service.h"
-#include "osdep_intf.h"
-
-#include <asm/byteorder.h>
-#include <linux/semaphore.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-
-int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data);
-int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data);
-int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data);
-int rtw_read_port(struct adapter *adapter, struct recv_buf *precvbuf);
-void rtw_read_port_cancel(struct adapter *adapter);
-
-int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
-int rtw_write16(struct adapter *adapter, u32 addr, u16 val);
-int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
-int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
-
-u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void rtw_write_port_cancel(struct adapter *adapter);
-
-#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl.h b/drivers/staging/r8188eu/include/rtw_ioctl.h
deleted file mode 100644
index c704f3040ac8..000000000000
--- a/drivers/staging/r8188eu/include/rtw_ioctl.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_IOCTL_H_
-#define _RTW_IOCTL_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-extern struct iw_handler_def rtw_handlers_def;
-extern int ui_pid[3];
-
-#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl_set.h b/drivers/staging/r8188eu/include/rtw_ioctl_set.h
deleted file mode 100644
index c3eb2479f27b..000000000000
--- a/drivers/staging/r8188eu/include/rtw_ioctl_set.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_IOCTL_SET_H_
-#define __RTW_IOCTL_SET_H_
-
-#include "drv_types.h"
-
-typedef u8 NDIS_802_11_PMKID_VALUE[16];
-
-u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
- enum ndis_802_11_auth_mode authmode);
-u8 rtw_set_802_11_bssid(struct adapter *adapter, u8 *bssid);
-u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep);
-void rtw_set_802_11_disassociate(struct adapter *adapter);
-u8 rtw_set_802_11_bssid_list_scan(struct adapter *adapter,
- struct ndis_802_11_ssid *pssid,
- int ssid_max_num);
-u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
- enum ndis_802_11_network_infra type);
-u8 rtw_set_802_11_ssid(struct adapter *adapt, struct ndis_802_11_ssid *ssid);
-u16 rtw_get_cur_max_rate(struct adapter *adapter);
-int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_iol.h b/drivers/staging/r8188eu/include/rtw_iol.h
deleted file mode 100644
index 099f5a075274..000000000000
--- a/drivers/staging/r8188eu/include/rtw_iol.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_IOL_H_
-#define __RTW_IOL_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define IOREG_CMD_END_LEN 4
-
-struct ioreg_cfg {
- u8 length;
- u8 cmd_id;
- __le16 address;
- __le32 data;
- __le32 mask;
-};
-
-enum ioreg_cmd {
- IOREG_CMD_LLT = 0x01,
- IOREG_CMD_REFUSE = 0x02,
- IOREG_CMD_EFUSE_PATH = 0x03,
- IOREG_CMD_WB_REG = 0x04,
- IOREG_CMD_WW_REG = 0x05,
- IOREG_CMD_WD_REG = 0x06,
- IOREG_CMD_W_RF = 0x07,
- IOREG_CMD_DELAY_US = 0x10,
- IOREG_CMD_DELAY_MS = 0x11,
- IOREG_CMD_END = 0xFF,
-};
-
-struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter);
-int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds,
- u32 cmd_len);
-bool rtw_IOL_applied(struct adapter *adapter);
-int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us);
-int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms);
-int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame);
-
-void read_efuse_from_txpktbuf(struct adapter *adapter, int bcnhead,
- u8 *content, u16 *size);
-
-int rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u8 value, u8 mask);
-int rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u16 value, u16 mask);
-int rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
- u32 value, u32 mask);
-int rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
- u16 addr, u32 value, u32 mask);
-
-u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
-
-#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
deleted file mode 100644
index ea5f5edd9013..000000000000
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_LED_H_
-#define __RTW_LED_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-enum LED_CTL_MODE {
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
- LED_CTL_START_WPS = 9,
- LED_CTL_STOP_WPS = 10,
- LED_CTL_STOP_WPS_FAIL = 12,
-};
-
-enum LED_STATE_871x {
- RTW_LED_OFF = 2,
- LED_BLINK_NORMAL = 3,
- LED_BLINK_SLOWLY = 4,
- LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
- * the # of times to blink is depend on time
- * for scanning. */
- LED_BLINK_TXRX = 9,
- LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
- LED_BLINK_WPS_STOP = 11,
-};
-
-struct led_priv {
- bool bRegUseLed;
-
- enum LED_STATE_871x CurrLedState; /* Current LED state. */
-
- bool bLedOn; /* true if LED is ON, false if LED is OFF. */
-
- bool bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
-
- bool bLedWPSBlinkInProgress;
-
- u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
-
- bool bLedScanBlinkInProgress;
- struct delayed_work blink_work;
-};
-
-void rtl8188eu_InitSwLeds(struct adapter *padapter);
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-
-void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction);
-
-#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
deleted file mode 100644
index 3ff653ff1d81..000000000000
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ /dev/null
@@ -1,574 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_MLME_H_
-#define __RTW_MLME_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "wlan_bssdef.h"
-
-#define MAX_BSS_CNT 128
-#define MAX_JOIN_TIMEOUT 6500
-
-/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
-
-#define SCANNING_TIMEOUT 8000
-
-#define SCAN_INTERVAL (30) /* unit:2sec, 30*2=60sec */
-
-#define SCANQUEUE_LIFETIME 20 /* unit:sec */
-
-#define WIFI_NULL_STATE 0x00000000
-
-#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state */
-#define WIFI_REASOC_STATE 0x00000002
-#define WIFI_SLEEP_STATE 0x00000004
-#define WIFI_STATION_STATE 0x00000008
-
-#define WIFI_AP_STATE 0x00000010
-#define WIFI_ADHOC_STATE 0x00000020
-#define WIFI_ADHOC_MASTER_STATE 0x00000040
-#define WIFI_UNDER_LINKING 0x00000080
-
-#define WIFI_UNDER_WPS 0x00000100
-#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
-#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station is under site surveying */
-
-#define WIFI_MP_STATE 0x00010000
-#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continuous tx background */
-#define WIFI_MP_CTX_ST 0x00040000 /* in continuous tx with single-tone */
-#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continuous tx background due to out of skb */
-#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx */
-#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continuous tx with carrier suppression */
-#define WIFI_MP_LPBK_STATE 0x00400000
-
-#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
-#define _FW_LINKED WIFI_ASOC_STATE
-#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
-
-enum dot11AuthAlgrthmNum {
- dot11AuthAlgrthm_Open = 0,
- dot11AuthAlgrthm_Shared,
- dot11AuthAlgrthm_8021X,
- dot11AuthAlgrthm_Auto,
- dot11AuthAlgrthm_WAPI,
- dot11AuthAlgrthm_MaxNum
-};
-
-/* Scan type including active and passive scan. */
-enum rt_scan_type {
- SCAN_PASSIVE,
- SCAN_ACTIVE,
- SCAN_MIX,
-};
-
-/*
-there are several "locks" in mlme_priv,
-since mlme_priv is a shared resource between many threads,
-like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
-Each _queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
-
-To avoid possible dead lock, any thread trying to modifiying mlme_priv
-SHALL not lock up more than one lock at a time!
-*/
-
-#define traffic_threshold 10
-#define traffic_scan_period 500
-
-struct sitesurvey_ctrl {
- u64 last_tx_pkts;
- uint last_rx_pkts;
- int traffic_busy;
- struct timer_list sitesurvey_ctrl_timer;
-};
-
-struct rt_link_detect {
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
- u32 NumRxUnicastOkInPeriod;
- bool bBusyTraffic;
- bool bTxBusyTraffic;
- bool bRxBusyTraffic;
- bool bHigherBusyTraffic; /* For interrupt migration purpose. */
- bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according
- * to Rx traffic. */
- bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according
- * to Tx traffic. */
-};
-
-struct profile_info {
- u8 ssidlen;
- u8 ssid[WLAN_SSID_MAXLEN];
- u8 peermac[ETH_ALEN];
-};
-
-struct tx_invite_req_info {
- u8 token;
- u8 benable;
- u8 go_ssid[WLAN_SSID_MAXLEN];
- u8 ssidlen;
- u8 go_bssid[ETH_ALEN];
- u8 peer_macaddr[ETH_ALEN];
- u8 operating_ch; /* This information will be set by using the
- * p2p_set op_ch=x */
- u8 peer_ch; /* The listen channel for peer P2P device */
-};
-
-struct tx_invite_resp_info {
- u8 token; /* Used to record the dialog token of p2p invitation
- * request frame. */
-};
-
-struct tx_provdisc_req_info {
- u16 wps_config_method_request; /* Used when sending the
- * provisioning request frame*/
- u16 peer_channel_num[2]; /* The channel number which the
- * receiver stands. */
- struct ndis_802_11_ssid ssid;
- u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
- u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */
- u8 benable; /* This provision discovery
- * request frame is trigger
- * to send or not */
-};
-
-/* When peer device issue prov_disc_req first, we should store the following
- * information */
-/* The UI must know this information to know which config method the
- * remote p2p device needs. */
-struct rx_provdisc_req_info {
- u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
- u8 strconfig_method_desc_of_prov_disc_req[4]; /* description
- * for the config method located in the provisioning
- * discovery request frame. */
-};
-
-struct tx_nego_req_info {
- u16 peer_channel_num[2]; /* The channel number. */
- u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
- u8 benable; /* This negotiation request frame is
- * trigger to send or not */
-};
-
-struct group_id_info {
- u8 go_device_addr[ETH_ALEN]; /* The GO's device address of
- * this P2P group */
- u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */
-};
-
-struct scan_limit_info {
- u8 scan_op_ch_only; /* When this flag is set, the driver
- * should only scan the op. channel */
- u8 operation_ch[2]; /* Store the op. chan of invitation */
-};
-
-struct wifidirect_info {
- struct adapter *padapter;
- struct timer_list find_phase_timer;
- struct timer_list restore_p2p_state_timer;
-
- /* Used to do the scanning. After confirming the peer is availalble,
- * the driver transmits the P2P frame to peer. */
- struct timer_list pre_tx_scan_timer;
- struct timer_list reset_ch_sitesurvey;
- struct timer_list reset_ch_sitesurvey2; /* Just for resetting the scan
- * limit function by using p2p nego */
- struct tx_provdisc_req_info tx_prov_disc_info;
- struct rx_provdisc_req_info rx_prov_disc_info;
- struct tx_invite_req_info invitereq_info;
- /* Store the profile information of persistent group */
- struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM];
- struct tx_invite_resp_info inviteresp_info;
- struct tx_nego_req_info nego_req_info;
- /* Store the group id info when doing the group negot handshake. */
- struct group_id_info groupid_info;
- /* Used for get the limit scan channel from the Invitation procedure */
- struct scan_limit_info rx_invitereq_info;
- /* Used for get the limit scan chan from the P2P negotiation handshake*/
- struct scan_limit_info p2p_info;
- enum P2P_ROLE role;
- enum P2P_STATE pre_p2p_state;
- enum P2P_STATE p2p_state;
- /* The device address should be the mac address of this device. */
- u8 device_addr[ETH_ALEN];
- u8 interface_addr[ETH_ALEN];
- u8 social_chan[4];
- u8 listen_channel;
- u8 operating_channel;
- u8 listen_dwell; /* This value should be between 1 and 3 */
- u8 support_rate[8];
- u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
- u8 intent; /* should only include the intent value. */
- u8 p2p_peer_interface_addr[ETH_ALEN];
- u8 p2p_peer_device_addr[ETH_ALEN];
- u8 peer_intent; /* Included the intent value and tie breaker value. */
- /* Device name for displaying on searching device screen */
- u8 device_name[WPS_MAX_DEVICE_NAME_LEN];
- u8 device_name_len;
- u8 profileindex; /* Used to point to the index of profileinfo array */
- u8 peer_operating_ch;
- u8 find_phase_state_exchange_cnt;
- /* The device password ID for group negotiation */
- u16 device_password_id_for_nego;
- u8 negotiation_dialog_token;
- /* SSID information for group negotitation */
- u8 nego_ssid[WLAN_SSID_MAXLEN];
- u8 nego_ssidlen;
- u8 p2p_group_ssid[WLAN_SSID_MAXLEN];
- u8 p2p_group_ssid_len;
- /* Flag to know if the persistent function should be supported or not.*/
- u8 persistent_supported;
- /* In the Sigma test, the Sigma will provide this enable from the
- * sta_set_p2p CAPI. */
- /* 0: disable */
- /* 1: enable */
- u8 session_available; /* Flag to set the WFD session available to
- * enable or disable "by Sigma" */
- /* In the Sigma test, the Sigma will disable the session available
- * by using the sta_preset CAPI. */
- /* 0: disable */
- /* 1: enable */
-
- /* This field will store the WPS value (PIN value or PBC) that UI had
- * got from the user. */
- enum P2P_WPSINFO ui_got_wps_info;
- u16 supported_wps_cm; /* This field describes the WPS config method
- * which this driver supported. */
- /* The value should be the combination of config
- * method defined in page104 of WPS v2.0 spec.*/
- /* This field will contain the length of body of P2P Channel List
- * attribute of group negotiation response frame. */
- uint channel_list_attr_len;
- /* This field will contain the body of P2P Channel List attribute of
- * group negotitation response frame. */
- /* We will use the channel_cnt and channel_list fields when constructing
- * the group negotiation confirm frame. */
- u8 channel_list_attr[100];
- enum P2P_PS_MODE p2p_ps_mode; /* indicate p2p ps mode */
- enum P2P_PS_STATE p2p_ps_state; /* indicate p2p ps state */
- u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
- u8 ctwindow; /* Client traffic window. A period of time in TU after TBTT. */
- u8 opp_ps; /* opportunistic power save. */
- u8 noa_num; /* number of NoA descriptor in P2P IE. */
- u8 noa_count[P2P_MAX_NOA_NUM]; /* Count for owner, Type of client. */
- /* Max duration for owner, preferred or min acceptable duration for
- * client. */
- u32 noa_duration[P2P_MAX_NOA_NUM];
- /* Length of interval for owner, preferred or max acceptable interval
- * of client. */
- u32 noa_interval[P2P_MAX_NOA_NUM];
- /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
- u32 noa_start_time[P2P_MAX_NOA_NUM];
-};
-
-struct tdls_ss_record { /* signal strength record */
- u8 macaddr[ETH_ALEN];
- u8 RxPWDBAll;
- u8 is_tdls_sta; /* true: direct link sta, false: else */
-};
-
-struct tdls_info {
- u8 ap_prohibited;
- uint setup_state;
- u8 sta_cnt;
- u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
- struct tdls_ss_record ss_record;
- u8 macid_index; /* macid entry that is ready to write */
- u8 clear_cam; /* cam entry that is trying to clear, using it in direct link teardown */
- u8 ch_sensing;
- u8 cur_channel;
- u8 candidate_ch;
- u8 collect_pkt_num[MAX_CHANNEL_NUM];
- spinlock_t cmd_lock;
- spinlock_t hdl_lock;
- u8 watchdog_count;
- u8 dev_discovered; /* WFD_TDLS: for sigma test */
- u8 enable;
-};
-
-struct qos_priv {
- /* bit mask option: u-apsd,
- * s-apsd, ts, block ack... */
- unsigned int qos_option;
-};
-
-struct mlme_priv {
- spinlock_t lock;
- int fw_state; /* shall we protect this variable? maybe not necessarily... */
- bool bScanInProcess;
- u8 to_join; /* flag */
- u8 to_roaming; /* roaming trying times */
-
- u8 *nic_hdl;
-
- struct list_head *pscanned;
- struct __queue free_bss_pool;
- struct __queue scanned_queue;
- u8 *free_bss_buf;
- u8 key_mask; /* use to restore wep key after hal_init */
- u32 num_of_scanned;
-
- struct ndis_802_11_ssid assoc_ssid;
- u8 assoc_bssid[6];
-
- struct wlan_network cur_network;
- struct wlan_network *cur_network_scanned;
-
- u32 scan_interval;
-
- struct timer_list assoc_timer;
-
- uint assoc_by_bssid;
- uint assoc_by_rssi;
-
- struct timer_list scan_to_timer; /* driver itself handles scan_timeout status. */
- u32 scan_start_time; /* used to evaluate the time spent in scanning */
-
- struct qos_priv qospriv;
-
- /* Number of non-HT AP/stations */
- int num_sta_no_ht;
-
- /* Number of HT AP/stations 20 MHz */
- /* int num_sta_ht_20mhz; */
-
- int num_FortyMHzIntolerant;
- struct ht_priv htpriv;
- struct rt_link_detect LinkDetectInfo;
- struct timer_list dynamic_chk_timer; /* dynamic/periodic check timer */
-
- u8 acm_mask; /* for wmm acm mask */
- u8 ChannelPlan;
- enum rt_scan_type scan_mode; /* active: 1, passive: 0 */
-
- /* u8 probereq_wpsie[MAX_WPS_IE_LEN];added in probe req */
- /* int probereq_wpsie_len; */
- u8 *wps_probe_req_ie;
- u32 wps_probe_req_ie_len;
-
- u8 *assoc_req;
- u32 assoc_req_len;
-
- /* Number of associated Non-ERP stations (i.e., stations using 802.11b
- * in 802.11g BSS) */
- int num_sta_non_erp;
-
- /* Number of associated stations that do not support Short Slot Time */
- int num_sta_no_short_slot_time;
-
- /* Number of associated stations that do not support Short Preamble */
- int num_sta_no_short_preamble;
-
- int olbc; /* Overlapping Legacy BSS Condition */
-
- /* Number of HT assoc sta that do not support greenfield */
- int num_sta_ht_no_gf;
-
- /* Number of associated non-HT stations */
- /* int num_sta_no_ht; */
-
- /* Number of HT associated stations 20 MHz */
- int num_sta_ht_20mhz;
-
- /* Overlapping BSS information */
- int olbc_ht;
-
- u16 ht_op_mode;
-
- u8 *wps_beacon_ie;
- /* u8 *wps_probe_req_ie; */
- u8 *wps_probe_resp_ie;
- u8 *wps_assoc_resp_ie;
-
- u32 wps_beacon_ie_len;
- u32 wps_probe_resp_ie_len;
- u32 wps_assoc_resp_ie_len;
-
- u8 *p2p_beacon_ie;
- u8 *p2p_probe_req_ie;
- u8 *p2p_probe_resp_ie;
- u8 *p2p_go_probe_resp_ie; /* for GO */
- u8 *p2p_assoc_req_ie;
-
- u32 p2p_beacon_ie_len;
- u32 p2p_probe_req_ie_len;
- u32 p2p_probe_resp_ie_len;
- u32 p2p_go_probe_resp_ie_len; /* for GO */
- u32 p2p_assoc_req_ie_len;
- spinlock_t bcn_update_lock;
- u8 update_bcn;
-};
-
-int hostapd_mode_init(struct adapter *padapter);
-void hostapd_mode_unload(struct adapter *padapter);
-
-extern unsigned char WPA_TKIP_CIPHER[4];
-extern unsigned char RSN_TKIP_CIPHER[4];
-extern unsigned char REALTEK_96B_IE[];
-extern unsigned char MCS_rate_2R[16];
-extern unsigned char MCS_rate_1R[16];
-
-void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf);
-void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf);
-void indicate_wx_scan_complete_event(struct adapter *padapter);
-void rtw_indicate_wx_assoc_event(struct adapter *padapter);
-void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
-int event_thread(void *context);
-void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
-int rtw_init_mlme_priv(struct adapter *adapter);
-void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
-int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
-int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv,
- int keyid, u8 set_tx);
-int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
-
-static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
-{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress=> bssid */
- /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress=> ibss mac address */
- return pmlmepriv->cur_network.network.MacAddress;
-}
-
-static inline bool check_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- if (pmlmepriv->fw_state & state)
- return true;
-
- return false;
-}
-
-/*
- * No Limit on the calling context,
- * therefore set it to be the critical section...
- *
- * ### NOTE:#### (!!!!)
- * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
- */
-static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- pmlmepriv->fw_state |= state;
- /* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
- pmlmepriv->bScanInProcess = true;
-}
-
-static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
-{
- pmlmepriv->fw_state &= ~state;
- /* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
- pmlmepriv->bScanInProcess = false;
-}
-
-/*
- * No Limit on the calling context,
- * therefore set it to be the critical section...
- */
-static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, state))
- pmlmepriv->fw_state ^= state;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
-{
- spin_lock_bh(&pmlmepriv->lock);
- _clr_fwstate_(pmlmepriv, state);
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned++;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned--;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned = val;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-u16 rtw_get_capability(struct wlan_bssid_ex *bss);
-void rtw_update_scanned_network(struct adapter *adapter,
- struct wlan_bssid_ex *target);
-void rtw_disconnect_hdl_under_linked(struct adapter *adapter,
- struct sta_info *psta, u8 free_assoc);
-void rtw_generate_random_ibss(u8 *pibss);
-struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr);
-struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
-
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
-void rtw_indicate_disconnect(struct adapter *adapter);
-void rtw_indicate_connect(struct adapter *adapter);
-void rtw_indicate_scan_done(struct adapter *padapter);
-
-int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len);
-int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len, uint initial_out_len);
-void rtw_init_registrypriv_dev_network(struct adapter *adapter);
-
-void rtw_update_registrypriv_dev_network(struct adapter *adapter);
-
-void _rtw_join_timeout_handler(struct adapter *adapter);
-void rtw_scan_timeout_handler(struct adapter *adapter);
-
- void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
-
-void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
-
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
-
-void _rtw_free_network(struct mlme_priv *pmlmepriv,
- struct wlan_network *pnetwork, u8 isfreeall);
-
-struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr);
-
-void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
-
-int rtw_if_up(struct adapter *padapter);
-
-u8 *rtw_get_capability_from_ie(u8 *ie);
-u8 *rtw_get_beacon_interval_from_ie(u8 *ie);
-
-void rtw_joinbss_reset(struct adapter *padapter);
-
-unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie,
- u8 *out_ie, uint in_len, uint *pout_len);
-void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len);
-void rtw_issue_addbareq_cmd(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
-
-int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork);
-int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
-
-void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
-void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
-void rtw_set_roaming(struct adapter *adapter, u8 to_roaming);
-u8 rtw_to_roaming(struct adapter *adapter);
-
-void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid);
-void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
- u32 mstatus);
-
-u8 rtw_current_antenna(struct adapter *adapter);
-
-#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
deleted file mode 100644
index 589de7c54d93..000000000000
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ /dev/null
@@ -1,753 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_MLME_EXT_H_
-#define __RTW_MLME_EXT_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "wlan_bssdef.h"
-
-/* Commented by Albert 20101105 */
-/* Increase the SURVEY_TO value from 100 to 150 ( 100ms to 150ms ) */
-/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
-/* So, this driver tried to extend the dwell time for each scanning channel. */
-/* This will increase the chance to receive the probe response from SoftAP. */
-
-#define SURVEY_TO (100)
-#define REAUTH_TO (300) /* 50) */
-#define REASSOC_TO (300) /* 50) */
-/* define DISCONNECT_TO (3000) */
-#define ADDBA_TO (2000)
-
-#define LINKED_TO (1) /* unit:2 sec, 1x2=2 sec */
-
-#define REAUTH_LIMIT (4)
-#define REASSOC_LIMIT (4)
-
-#define DYNAMIC_FUNC_DISABLE (0x0)
-
-/* ====== ODM_ABILITY_E ======== */
-/* BB ODM section BIT 0-15 */
-#define DYNAMIC_BB_DIG BIT(0)
-
-#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
-
-#define _HW_STATE_NOLINK_ 0x00
-#define _HW_STATE_ADHOC_ 0x01
-#define _HW_STATE_STATION_ 0x02
-#define _HW_STATE_AP_ 0x03
-
-#define _1M_RATE_ 0
-#define _2M_RATE_ 1
-#define _5M_RATE_ 2
-#define _11M_RATE_ 3
-#define _6M_RATE_ 4
-#define _9M_RATE_ 5
-#define _12M_RATE_ 6
-#define _18M_RATE_ 7
-#define _24M_RATE_ 8
-#define _36M_RATE_ 9
-#define _48M_RATE_ 10
-#define _54M_RATE_ 11
-
-extern unsigned char RTW_WPA_OUI[];
-extern unsigned char WMM_OUI[];
-extern unsigned char WPS_OUI[];
-extern unsigned char WFD_OUI[];
-extern unsigned char P2P_OUI[];
-
-extern unsigned char WMM_INFO_OUI[];
-extern unsigned char WMM_PARA_OUI[];
-
-/* Channel Plan Type. */
-/* Note: */
-/* We just add new channel plan when the new channel plan is different
- * from any of the following channel plan. */
-/* If you just want to customize the actions(scan period or join actions)
- * about one of the channel plan, */
-/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
-enum RT_CHANNEL_DOMAIN {
- /* old channel plan mapping ===== */
- RT_CHANNEL_DOMAIN_FCC = 0x00,
- RT_CHANNEL_DOMAIN_IC = 0x01,
- RT_CHANNEL_DOMAIN_ETSI = 0x02,
- RT_CHANNEL_DOMAIN_SPAIN = 0x03,
- RT_CHANNEL_DOMAIN_FRANCE = 0x04,
- RT_CHANNEL_DOMAIN_MKK = 0x05,
- RT_CHANNEL_DOMAIN_MKK1 = 0x06,
- RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
- RT_CHANNEL_DOMAIN_TELEC = 0x08,
- RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
- RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
- RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
- RT_CHANNEL_DOMAIN_CHINA = 0x0C,
- RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
- RT_CHANNEL_DOMAIN_KOREA = 0x0E,
- RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
- RT_CHANNEL_DOMAIN_JAPAN = 0x10,
- RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
- RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
- RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
-
- /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
- RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
- RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
- RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
- RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
- RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
- RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
- RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
- RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
- RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
- RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
- RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
- RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
- RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
- RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
- RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
- RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
- RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
- RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
- RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
- RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
- RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
- RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G = 0x41,
- /* Add new channel plan above this line=============== */
- RT_CHANNEL_DOMAIN_MAX,
- RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
-};
-
-enum RT_CHANNEL_DOMAIN_2G {
- RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwide 13 */
- RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
- RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
- RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
- RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
- RT_CHANNEL_DOMAIN_2G_NULL = 0x05,
- /* Add new channel plan above this line=============== */
- RT_CHANNEL_DOMAIN_2G_MAX,
-};
-
-#define rtw_is_channel_plan_valid(chplan) \
- (chplan < RT_CHANNEL_DOMAIN_MAX || \
- chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
-
-struct rt_channel_plan {
- unsigned char Channel[MAX_CHANNEL_NUM];
- unsigned char Len;
-};
-
-struct rt_channel_plan_map {
- unsigned char Index2G;
-};
-
-enum Associated_AP {
- atherosAP = 0,
- broadcomAP = 1,
- ciscoAP = 2,
- marvellAP = 3,
- ralinkAP = 4,
- realtekAP = 5,
- airgocapAP = 6,
- unknownAP = 7,
- maxAP,
-};
-
-enum HT_IOT_PEER {
- HT_IOT_PEER_UNKNOWN = 0,
- HT_IOT_PEER_REALTEK = 1,
- HT_IOT_PEER_REALTEK_92SE = 2,
- HT_IOT_PEER_BROADCOM = 3,
- HT_IOT_PEER_RALINK = 4,
- HT_IOT_PEER_ATHEROS = 5,
- HT_IOT_PEER_CISCO = 6,
- HT_IOT_PEER_MERU = 7,
- HT_IOT_PEER_MARVELL = 8,
- HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP */
- HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
- HT_IOT_PEER_AIRGO = 11,
- HT_IOT_PEER_INTEL = 12,
- HT_IOT_PEER_RTK_APCLIENT = 13,
- HT_IOT_PEER_REALTEK_81XX = 14,
- HT_IOT_PEER_REALTEK_WOW = 15,
- HT_IOT_PEER_TENDA = 16,
- HT_IOT_PEER_MAX = 17
-};
-
-enum SCAN_STATE {
- SCAN_DISABLE = 0,
- SCAN_START = 1,
- SCAN_TXNULL = 2,
- SCAN_PROCESS = 3,
- SCAN_COMPLETE = 4,
- SCAN_STATE_MAX,
-};
-
-typedef void (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame);
-
-struct ss_res {
- int state;
- int bss_cnt;
- int channel_idx;
- int scan_mode;
- u8 ssid_num;
- u8 ch_num;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
-};
-
-/* define AP_MODE 0x0C */
-/* define STATION_MODE 0x08 */
-/* define AD_HOC_MODE 0x04 */
-/* define NO_LINK_MODE 0x00 */
-
-#define WIFI_FW_NULL_STATE _HW_STATE_NOLINK_
-#define WIFI_FW_STATION_STATE _HW_STATE_STATION_
-#define WIFI_FW_AP_STATE _HW_STATE_AP_
-#define WIFI_FW_ADHOC_STATE _HW_STATE_ADHOC_
-
-#define WIFI_FW_AUTH_NULL 0x00000100
-#define WIFI_FW_AUTH_STATE 0x00000200
-#define WIFI_FW_AUTH_SUCCESS 0x00000400
-
-#define WIFI_FW_ASSOC_STATE 0x00002000
-#define WIFI_FW_ASSOC_SUCCESS 0x00004000
-
-#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | \
- WIFI_FW_AUTH_STATE | \
- WIFI_FW_AUTH_SUCCESS | \
- WIFI_FW_ASSOC_STATE)
-
-struct FW_Sta_Info {
- struct sta_info *psta;
- u32 status;
- u32 rx_pkt;
- u32 retry;
- unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
-};
-
-/*
- * Usage:
- * When one iface acted as AP mode and the other iface is STA mode and scanning,
- * it should switch back to AP's operating channel periodically.
- * Parameters info:
- * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to
- * AP's operating channel for
- * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
- * Example:
- * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
- * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MS is 3 and SURVEY_TO is 100.
- * When it's STA mode gets set_scan command,
- * it would
- * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
- * 2. Back to channel 1 for 300 milliseconds
- * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
- * 4. Back to channel 1 for 300 milliseconds
- * 5. ... and so on, till survey done.
- */
-
-struct mlme_ext_info {
- u32 state;
- u32 reauth_count;
- u32 reassoc_count;
- u32 link_count;
- u32 auth_seq;
- u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
- u32 authModeToggle;
- u32 enc_algo;/* encrypt algorithm; */
- u32 key_index; /* this is only valid for legacy wep,
- * 0~3 for key id. */
- u32 iv;
- u8 chg_txt[128];
- u16 aid;
- u16 bcn_interval;
- u16 capability;
- u8 assoc_AP_vendor;
- u8 slotTime;
- u8 preamble_mode;
- u8 WMM_enable;
- u8 ERP_enable;
- u8 ERP_IE;
- u8 HT_enable;
- u8 HT_caps_enable;
- u8 HT_info_enable;
- u8 HT_protection;
- u8 turboMode_cts2self;
- u8 turboMode_rtsen;
- u8 SM_PS;
- u8 agg_enable_bitmap;
- u8 ADDBA_retry_count;
- u8 candidate_tid_bitmap;
- u8 dialogToken;
- /* Accept ADDBA Request */
- bool bAcceptAddbaReq;
- u8 bwmode_updated;
- u8 hidden_ssid_mode;
-
- struct WMM_para_element WMM_param;
- struct HT_caps_element HT_caps;
- struct HT_info_element HT_info;
- struct wlan_bssid_ex network;/* join network or bss_network,
- * if in ap mode, it is the same
- * as cur_network.network */
- struct FW_Sta_Info FW_sta_info[NUM_STA];
-};
-
-/* The channel information about this channel including joining,
- * scanning, and power constraints. */
-struct rt_channel_info {
- u8 ChannelNum; /* The channel number. */
- enum rt_scan_type ScanType; /* Scan type such as passive
- * or active scan. */
- u32 rx_count;
-};
-
-int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch);
-
-/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
-#define P2P_MAX_REG_CLASSES 10
-
-/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of chan per regulatory class */
-#define P2P_MAX_REG_CLASS_CHANNELS 20
-
-/* struct p2p_channels - List of supported channels */
-struct p2p_channels {
- /* struct p2p_reg_class - Supported regulatory class */
- struct p2p_reg_class {
- /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
- u8 reg_class;
-
- /* channel - Supported channels */
- u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
-
- /* channels - Number of channel entries in use */
- size_t channels;
- } reg_class[P2P_MAX_REG_CLASSES];
-
- /* reg_classes - Number of reg_class entries in use */
- size_t reg_classes;
-};
-
-struct p2p_oper_class_map {
- enum hw_mode {IEEE80211G} mode;
- u8 op_class;
- u8 min_chan;
- u8 max_chan;
- u8 inc;
- enum {BW20, BW40PLUS, BW40MINUS} bw;
-};
-
-struct mlme_ext_priv {
- struct adapter *padapter;
- u8 mlmeext_init;
- atomic_t event_seq;
- u16 mgnt_seq;
-
- unsigned char cur_channel;
- unsigned char cur_bwmode;
- unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
- unsigned char cur_wireless_mode; /* NETWORK_TYPE */
-
- unsigned char oper_channel; /* saved chan info when call
- * set_channel_bw */
- unsigned char oper_bwmode;
- unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
-
- unsigned char max_chan_nums;
- struct rt_channel_info channel_set[MAX_CHANNEL_NUM];
- struct p2p_channels channel_list;
- unsigned char basicrate[NumRates];
- unsigned char datarate[NumRates];
-
- struct ss_res sitesurvey_res;
- struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including
- * current scan/connecting/connected
- * related info. For ap mode,
- * network includes ap's cap_info*/
- struct timer_list survey_timer;
- struct timer_list link_timer;
- u16 chan_scan_time;
-
- u8 scan_abort;
- u8 tx_rate; /* TXRATE when USERATE is set. */
-
- u32 retry; /* retry for issue probereq */
-
- u64 TSFValue;
-
- unsigned char bstart_bss;
- u8 update_channel_plan_by_ap_done;
- /* recv_decache check for Action_public frame */
- u8 action_public_dialog_token;
- u16 action_public_rxseq;
- u8 active_keep_alive_check;
-};
-
-void init_mlme_ext_priv(struct adapter *adapter);
-int init_hw_mlme_ext(struct adapter *padapter);
-void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
-struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
-
-unsigned char networktype_to_raid(unsigned char network_type);
-u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
-void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
-
-void Save_DM_Func_Flag(struct adapter *padapter);
-void Restore_DM_Func_Flag(struct adapter *padapter);
-
-void Set_MSR(struct adapter *padapter, u8 type);
-
-u8 rtw_get_oper_ch(struct adapter *adapter);
-void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
-void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
-void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
-
-void set_channel_bwmode(struct adapter *padapter, unsigned char channel,
- unsigned char channel_offset, unsigned short bwmode);
-void SelectChannel(struct adapter *padapter, unsigned char channel);
-void SetBWMode(struct adapter *padapter, unsigned short bwmode,
- unsigned char channel_offset);
-
-unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
-
-void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
-void clear_cam_entry(struct adapter *padapter, u8 entry);
-
-void invalidate_cam_all(struct adapter *padapter);
-
-int allocate_fw_sta_entry(struct adapter *padapter);
-void flush_all_cam_entry(struct adapter *padapter);
-
-void rtw_mlme_under_site_survey(struct adapter *adapter);
-void rtw_mlme_site_survey_done(struct adapter *adapter);
-
-void site_survey(struct adapter *padapter);
-u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame,
- struct wlan_bssid_ex *bssid);
-void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
- struct adapter *adapter, bool update_ie);
-
-u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
-u16 get_beacon_interval(struct wlan_bssid_ex *bss);
-
-bool r8188eu_is_client_associated_to_ap(struct adapter *padapter);
-bool r8188eu_is_client_associated_to_ibss(struct adapter *padapter);
-bool r8188eu_is_ibss_empty(struct adapter *padapter);
-
-unsigned char check_assoc_AP(u8 *pframe, uint len);
-
-int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
-void WMMOnAssocRsp(struct adapter *padapter);
-
-void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
-void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
-void HTOnAssocRsp(struct adapter *padapter);
-
-void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
-void VCS_update(struct adapter *padapter, struct sta_info *psta);
-
-void update_beacon_info(struct adapter *padapter, u8 *ie_ptr, uint ie_len, struct sta_info *psta);
-int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
-void update_IOT_info(struct adapter *padapter);
-void update_capinfo(struct adapter *adapter, u16 updatecap);
-void update_wireless_mode(struct adapter *padapter);
-void rtw_set_basic_rate(struct adapter *adapter, u8 *rates);
-void update_tx_basic_rate(struct adapter *padapter, u8 modulation);
-void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id);
-int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie,
- uint var_ie_len, int cam_idx);
-
-/* for sta/adhoc mode */
-void update_sta_info(struct adapter *padapter, struct sta_info *psta);
-unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz);
-unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz);
-unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps);
-void Update_RA_Entry(struct adapter *padapter, u32 mac_id);
-void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
-
-void receive_disconnect(struct adapter *padapter, unsigned char *macaddr, unsigned short reason);
-
-unsigned char get_highest_rate_idx(u32 mask);
-int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
-bool is_ap_in_tkip(struct adapter *padapter);
-
-void report_join_res(struct adapter *padapter, int res);
-void report_survey_event(struct adapter *padapter, struct recv_frame *precv_frame);
-void report_surveydone_event(struct adapter *padapter);
-void report_del_sta_event(struct adapter *padapter,
- unsigned char *addr, unsigned short reason);
-void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
- int cam_idx);
-
-void beacon_timing_control(struct adapter *padapter);
-u8 set_tx_beacon_cmd(struct adapter *padapter);
-unsigned int setup_beacon_frame(struct adapter *padapter,
- unsigned char *beacon_frame);
-void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
-void update_mgntframe_attrib(struct adapter *padapter,
- struct pkt_attrib *pattrib);
-void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe);
-s32 dump_mgntframe_and_wait(struct adapter *padapter,
- struct xmit_frame *pmgntframe, int timeout_ms);
-s32 dump_mgntframe_and_wait_ack(struct adapter *padapter,
- struct xmit_frame *pmgntframe);
-
-void issue_probersp_p2p(struct adapter *padapter, unsigned char *da);
-void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid,
- u8 ussidlen, u8 *pdev_raddr);
-void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr);
-void issue_probereq_p2p(struct adapter *padapter);
-void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
- u8 dialogToken, u8 success);
-void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr);
-void issue_beacon(struct adapter *padapter, int timeout_ms);
-void issue_probersp(struct adapter *padapter, unsigned char *da,
- u8 is_valid_p2p_probereq);
-void issue_assocreq(struct adapter *padapter);
-void issue_asocrsp(struct adapter *padapter, unsigned short status,
- struct sta_info *pstat, int pkt_type);
-void issue_auth(struct adapter *padapter, struct sta_info *psta,
- unsigned short status);
-void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
- u8 *da);
-void issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da);
-int issue_nulldata(struct adapter *padapter, unsigned char *da,
- unsigned int power_mode, int try_cnt, int wait_ms);
-int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
- u16 tid, int try_cnt, int wait_ms);
-int issue_deauth(struct adapter *padapter, unsigned char *da,
- unsigned short reason);
-int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason,
- int try_cnt, int wait_ms);
-void issue_action_BA(struct adapter *padapter, unsigned char *raddr, u8 action,
- u16 status, struct ieee80211_mgmt *mgmt_req);
-unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
-unsigned int send_beacon(struct adapter *padapter);
-bool get_beacon_valid_bit(struct adapter *adapter);
-void clear_beacon_valid_bit(struct adapter *adapter);
-void rtw_resume_tx_beacon(struct adapter *adapt);
-void rtw_stop_tx_beacon(struct adapter *adapt);
-
-void start_clnt_assoc(struct adapter *padapter);
-void start_clnt_auth(struct adapter *padapter);
-void start_clnt_join(struct adapter *padapter);
-void start_create_ibss(struct adapter *padapter);
-
-void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
-void mlmeext_sta_del_event_callback(struct adapter *padapter);
-void mlmeext_sta_add_event_callback(struct adapter *padapter,
- struct sta_info *psta);
-
-void linked_status_chk(struct adapter *padapter);
-
-void survey_timer_hdl (struct adapter *padapter);
-void link_timer_hdl (struct adapter *padapter);
-void addba_timer_hdl(struct sta_info *psta);
-
-#define set_survey_timer(mlmeext, ms) \
- do { \
- _set_timer(&(mlmeext)->survey_timer, (ms)); \
- } while (0)
-
-#define set_link_timer(mlmeext, ms) \
- do { \
- _set_timer(&(mlmeext)->link_timer, (ms)); \
- } while (0)
-
-bool cckrates_included(unsigned char *rate, int ratelen);
-bool cckratesonly_included(unsigned char *rate, int ratelen);
-
-struct cmd_hdl {
- uint parmsize;
- u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf);
-};
-
-u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
-u8 NULL_hdl(struct adapter *padapter, u8 *pbuf);
-u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
-u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
-u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
-u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf);
-u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf);
-u8 setauth_hdl(struct adapter *padapter, u8 *pbuf);
-u8 setkey_hdl(struct adapter *padapter, u8 *pbuf);
-u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf);
-u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
-u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
-u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf);
-
-u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf);
-u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf);
-u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf);
-u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf);
-u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf);
-u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf);
-/* Handling DFS channel switch announcement ie. */
-u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf);
-u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
-
-#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl},
-#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
-
-#ifdef _RTW_CMD_C_
-
-static struct cmd_hdl wlancmds[] = {
- GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct joinbss_parm), join_cmd_hdl) /*14*/
- GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct createbss_parm), createbss_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm),
- sitesurvey_cmd_hdl) /*18*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl) /*20*/
- GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param),
- tx_beacon_hdl) /*55*/
-
- GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/
- GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/
-
- GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/
- GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param),
- set_chplan_hdl) /*59*/
- GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param),
- led_blink_hdl) /*60*/
-
- GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param),
- set_csa_hdl) /*61*/
- GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param),
- tdls_hdl) /*62*/
-};
-
-#endif
-
-struct C2HEvent_Header {
-#ifdef __LITTLE_ENDIAN
- unsigned int len:16;
- unsigned int ID:8;
- unsigned int seq:8;
-#elif defined(__BIG_ENDIAN)
- unsigned int seq:8;
- unsigned int ID:8;
- unsigned int len:16;
-#endif
- unsigned int rsvd;
-};
-
-enum rtw_c2h_event {
- GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
- GEN_EVT_CODE(_Read_BBREG),
- GEN_EVT_CODE(_Read_RFREG),
- GEN_EVT_CODE(_Read_EEPROM),
- GEN_EVT_CODE(_Read_EFUSE),
- GEN_EVT_CODE(_Read_CAM), /*5*/
- GEN_EVT_CODE(_Get_BasicRate),
- GEN_EVT_CODE(_Get_DataRate),
- GEN_EVT_CODE(_Survey), /*8*/
- GEN_EVT_CODE(_SurveyDone), /*9*/
-
- GEN_EVT_CODE(_JoinBss), /*10*/
- GEN_EVT_CODE(_AddSTA),
- GEN_EVT_CODE(_DelSTA),
- GEN_EVT_CODE(_AtimDone),
- GEN_EVT_CODE(_TX_Report),
- GEN_EVT_CODE(_CCX_Report), /*15*/
- GEN_EVT_CODE(_DTM_Report),
- GEN_EVT_CODE(_TX_Rate_Statistics),
- GEN_EVT_CODE(_C2HLBK),
- GEN_EVT_CODE(_FWDBG),
- GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
- GEN_EVT_CODE(_ADDBA),
- GEN_EVT_CODE(_C2HBCN),
- GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
- GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE,
- * work around ASPM */
- MAX_C2HEVT
-};
-
-#ifdef _RTW_MLME_EXT_C_
-
-static struct fwevent wlanevents[] = {
- {0, NULL}, /*0*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, &rtw_survey_event_callback}, /*8*/
- {sizeof (struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
- {0, &rtw_joinbss_event_callback}, /*10*/
- {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
- {sizeof(struct stadel_event), &rtw_stadel_event_callback},
- {0, NULL},
- {0, NULL},
- {0, NULL}, /*15*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL}, /*20*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
-};
-
-#endif/* _RTL_MLME_EXT_C_ */
-
-#endif /* __RTW_MLME_EXT_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_p2p.h b/drivers/staging/r8188eu/include/rtw_p2p.h
deleted file mode 100644
index b91322a1fe10..000000000000
--- a/drivers/staging/r8188eu/include/rtw_p2p.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_P2P_H_
-#define __RTW_P2P_H_
-
-#include "drv_types.h"
-
-u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
-u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
-u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo,
- u8 *pbuf, u8 *pssid, u8 ussidlen,
- u8 *pdev_raddr);
-u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo,
- u8 *pbuf, u8 status_code);
-u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len, struct sta_info *psta);
-u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe);
-u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo,
- u8 *pframe, uint len);
-u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe,
- uint len);
-void p2p_protocol_wk_hdl(struct adapter *padapter, int intcmdtype);
-void process_p2p_ps_ie(struct adapter *padapter, u8 *ies, u32 ielength);
-void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state);
-u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue);
-void reset_global_wifidirect_info(struct adapter *padapter);
-int rtw_init_wifi_display_info(struct adapter *padapter);
-void rtw_init_wifidirect_timers(struct adapter *padapter);
-void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr,
- u8 *iface_addr);
-void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role);
-int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role);
-
-static inline void _rtw_p2p_set_state(struct wifidirect_info *wdinfo,
- enum P2P_STATE state)
-{
- if (wdinfo->p2p_state != state)
- wdinfo->p2p_state = state;
-}
-
-static inline void _rtw_p2p_set_pre_state(struct wifidirect_info *wdinfo,
- enum P2P_STATE state)
-{
- if (wdinfo->pre_p2p_state != state)
- wdinfo->pre_p2p_state = state;
-}
-
-static inline void _rtw_p2p_set_role(struct wifidirect_info *wdinfo,
- enum P2P_ROLE role)
-{
- if (wdinfo->role != role)
- wdinfo->role = role;
-}
-
-static inline int _rtw_p2p_state(struct wifidirect_info *wdinfo)
-{
- return wdinfo->p2p_state;
-}
-
-static inline int _rtw_p2p_pre_state(struct wifidirect_info *wdinfo)
-{
- return wdinfo->pre_p2p_state;
-}
-
-static inline int _rtw_p2p_role(struct wifidirect_info *wdinfo)
-{
- return wdinfo->role;
-}
-
-static inline bool _rtw_p2p_chk_state(struct wifidirect_info *wdinfo,
- enum P2P_STATE state)
-{
- return wdinfo->p2p_state == state;
-}
-
-static inline bool _rtw_p2p_chk_role(struct wifidirect_info *wdinfo,
- enum P2P_ROLE role)
-{
- return wdinfo->role == role;
-}
-
-#define rtw_p2p_set_state(wdinfo, state) _rtw_p2p_set_state(wdinfo, state)
-#define rtw_p2p_set_pre_state(wdinfo, state) \
- _rtw_p2p_set_pre_state(wdinfo, state)
-#define rtw_p2p_set_role(wdinfo, role) _rtw_p2p_set_role(wdinfo, role)
-
-#define rtw_p2p_state(wdinfo) _rtw_p2p_state(wdinfo)
-#define rtw_p2p_pre_state(wdinfo) _rtw_p2p_pre_state(wdinfo)
-#define rtw_p2p_role(wdinfo) _rtw_p2p_role(wdinfo)
-#define rtw_p2p_chk_state(wdinfo, state) _rtw_p2p_chk_state(wdinfo, state)
-#define rtw_p2p_chk_role(wdinfo, role) _rtw_p2p_chk_role(wdinfo, role)
-
-#define rtw_p2p_findphase_ex_set(wdinfo, value) \
- ((wdinfo)->find_phase_state_exchange_cnt = (value))
-
-/* is this find phase exchange for social channel scan? */
-#define rtw_p2p_findphase_ex_is_social(wdinfo) \
-((wdinfo)->find_phase_state_exchange_cnt >= P2P_FINDPHASE_EX_SOCIAL_FIRST)
-
-/* should we need find phase exchange anymore? */
-#define rtw_p2p_findphase_ex_is_needed(wdinfo) \
- ((wdinfo)->find_phase_state_exchange_cnt < P2P_FINDPHASE_EX_MAX && \
- (wdinfo)->find_phase_state_exchange_cnt != P2P_FINDPHASE_EX_NONE)
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
deleted file mode 100644
index 9f5cffd8bfb1..000000000000
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef __RTW_PWRCTRL_H_
-#define __RTW_PWRCTRL_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define XMIT_ALIVE BIT(0)
-#define RECV_ALIVE BIT(1)
-#define CMD_ALIVE BIT(2)
-#define EVT_ALIVE BIT(3)
-
-enum power_mgnt {
- PS_MODE_ACTIVE = 0,
- PS_MODE_MIN,
- PS_MODE_MAX,
- PS_MODE_DTIM,
- PS_MODE_VOIP,
- PS_MODE_UAPSD_WMM,
- PM_Card_Disable,
- PS_MODE_NUM
-};
-
-#define LPS_DELAY_TIME 1*HZ /* 1 sec */
-
-/* RF state. */
-enum rt_rf_power_state {
- rf_on, /* RF is on after RFSleep or RFOff */
- rf_sleep, /* 802.11 Power Save mode */
- rf_off, /* HW/SW Radio OFF or Inactive Power Save */
- /* Add the new RF state above this line===== */
- rf_max
-};
-
-enum { /* for ips_mode */
- IPS_NONE = 0,
- IPS_NORMAL,
- IPS_LEVEL_2,
-};
-
-struct pwrctrl_priv {
- struct mutex lock; /* Mutex used to protect struct pwrctrl_priv */
-
- u8 pwr_mode;
- u8 smart_ps;
- u8 bcn_ant_mode;
-
- bool bpower_saving;
-
- uint ips_enter_cnts;
- uint ips_leave_cnts;
-
- u8 ips_mode;
- u8 ips_mode_req; /* used to accept the mode setting request,
- * will update to ipsmode later */
- uint bips_processing;
- unsigned long ips_deny_time; /* will deny IPS when system time less than this */
- u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
-
- u8 bLeisurePs;
- u8 LpsIdleCount;
- u8 power_mgnt;
- u8 bFwCurrentInPSMode;
- u32 DelayLPSLastTimeStamp;
-
- u8 bInSuspend;
- u8 bSupportRemoteWakeup;
- struct timer_list pwr_state_check_timer;
- int pwr_state_check_interval;
-
- enum rt_rf_power_state rf_pwrstate;/* cur power state */
-
- u8 bkeepfwalive;
-};
-
-#define rtw_get_ips_mode_req(pwrctrlpriv) \
- (pwrctrlpriv)->ips_mode_req
-
-#define rtw_ips_mode_req(pwrctrlpriv, ips_mode) \
- ((pwrctrlpriv)->ips_mode_req = (ips_mode))
-
-#define RTW_PWR_STATE_CHK_INTERVAL 2000
-
-#define _rtw_set_pwr_state_check_timer(pwrctrlpriv, ms) \
- do { \
- _set_timer(&(pwrctrlpriv)->pwr_state_check_timer, (ms)); \
- } while (0)
-
-#define rtw_set_pwr_state_check_timer(pwrctrl) \
- _rtw_set_pwr_state_check_timer((pwrctrl), \
- (pwrctrl)->pwr_state_check_interval)
-
-void rtw_init_pwrctrl_priv(struct adapter *adapter);
-
-void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode);
-void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
- u8 bcn_ant_mode);
-void LeaveAllPowerSaveMode(struct adapter *adapter);
-
-void rtw_ps_processor(struct adapter *padapter);
-
-void LPS_Enter(struct adapter *adapter);
-void LPS_Leave(struct adapter *adapter);
-
-int rtw_pwr_wakeup(struct adapter *adapter);
-int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
-int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
-
-#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
deleted file mode 100644
index 12026431a3d2..000000000000
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef _RTW_RECV_H_
-#define _RTW_RECV_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define NR_RECVFRAME 256
-
-#define RXFRAME_ALIGN 8
-#define RXFRAME_ALIGN_SZ (1<<RXFRAME_ALIGN)
-
-#define MAX_RXFRAME_CNT 512
-#define MAX_RX_NUMBLKS (32)
-#define RECVFRAME_HDR_ALIGN 128
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define MAX_SUBFRAME_COUNT 64
-
-#define LLC_HEADER_SIZE 6
-
-/* for Rx reordering buffer control */
-struct recv_reorder_ctrl {
- struct adapter *padapter;
- u8 enable;
- u16 indicate_seq;/* wstart_b, init_value=0xffff */
- u16 wend_b;
- u8 wsize_b;
- struct __queue pending_recvframe_queue;
- struct timer_list reordering_ctrl_timer;
-};
-
-struct stainfo_rxcache {
- u16 tid_rxseq[16];
-/*
- unsigned short tid0_rxseq;
- unsigned short tid1_rxseq;
- unsigned short tid2_rxseq;
- unsigned short tid3_rxseq;
- unsigned short tid4_rxseq;
- unsigned short tid5_rxseq;
- unsigned short tid6_rxseq;
- unsigned short tid7_rxseq;
- unsigned short tid8_rxseq;
- unsigned short tid9_rxseq;
- unsigned short tid10_rxseq;
- unsigned short tid11_rxseq;
- unsigned short tid12_rxseq;
- unsigned short tid13_rxseq;
- unsigned short tid14_rxseq;
- unsigned short tid15_rxseq;
-*/
-};
-
-struct signal_stat {
- u8 update_req; /* used to indicate */
- u8 avg_val; /* avg of valid elements */
- u32 total_num; /* num of valid elements */
- u32 total_val; /* sum of valid elements */
-};
-#define MAX_PATH_NUM_92CS 3
-struct phy_info {
- u8 RxPWDBAll;
- u8 SignalQuality; /* in 0-100 index. */
- u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
- s8 RxPower; /* in dBm Translate from PWdB */
-/* Real power in dBm for this packet, no beautification and aggregation.
- * Keep this raw info to be used for the other procedures. */
- s8 recvpower;
- u8 SignalStrength; /* in 0-100 index. */
- u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
-};
-
-struct rx_pkt_attrib {
- u16 pkt_len;
- u8 physt;
- u8 drvinfo_sz;
- u8 shift_sz;
- u8 hdrlen; /* the WLAN Header Len */
- u8 amsdu;
- bool qos;
- u8 priority;
- u8 pw_save;
- u8 mdata;
- u16 seq_num;
- u8 frag_num;
- u8 mfrag;
- u8 order;
- u8 privacy; /* in frame_ctrl field */
- u8 bdecrypted;
- u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorithm */
- u8 iv_len;
- u8 icv_len;
- u8 crc_err;
- u8 icv_err;
-
- u16 eth_type;
-
- u8 dst[ETH_ALEN] __aligned(2);
- u8 src[ETH_ALEN] __aligned(2);
- u8 ta[ETH_ALEN] __aligned(2);
- u8 ra[ETH_ALEN] __aligned(2);
- u8 bssid[ETH_ALEN] __aligned(2);
-
- u8 ack_policy;
-
- u8 key_index;
-
- u8 mcs_rate;
- u8 rxht;
- u8 sgi;
- u8 pkt_rpt_type;
- u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
-
- struct phy_info phy_info;
-};
-
-/* These definition is used for Rx packet reordering. */
-#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
-#define SN_EQUAL(a, b) (a == b)
-#define REORDER_WAIT_TIME (50) /* (ms) */
-
-#define RECVBUFF_ALIGN_SZ 8
-
-#define RXDESC_SIZE 24
-#define RXDESC_OFFSET RXDESC_SIZE
-
-struct recv_stat {
- __le32 rxdw0;
- __le32 rxdw1;
- __le32 rxdw2;
- __le32 rxdw3;
- __le32 rxdw4;
- __le32 rxdw5;
-};
-
-#define EOR BIT(30)
-
-/*
-accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
-recv_thread(passive) ; returnpkt(dispatch)
-; halt(passive) ;
-
-using enter_critical section to protect
-*/
-struct recv_priv {
- spinlock_t lock;
- struct __queue free_recv_queue;
- struct __queue recv_pending_queue;
- struct __queue uc_swdec_pending_queue;
- u8 *pallocated_frame_buf;
- u8 *precv_frame_buf;
- uint free_recvframe_cnt;
- struct adapter *adapter;
- u32 bIsAnyNonBEPkts;
- u64 rx_bytes;
- u64 rx_pkts;
- u64 rx_drop;
- u64 last_rx_bytes;
-
- uint rx_icv_err;
- uint rx_largepacket_crcerr;
- uint rx_smallpacket_crcerr;
- uint rx_middlepacket_crcerr;
- u8 rx_pending_cnt;
-
- struct tasklet_struct recv_tasklet;
- struct sk_buff_head free_recv_skb_queue;
- struct sk_buff_head rx_skb_queue;
- u8 *pallocated_recv_buf;
- u8 *precv_buf; /* 4 alignment */
- struct __queue free_recv_buf_queue;
- u32 free_recv_buf_queue_cnt;
- /* For display the phy information */
- u8 is_signal_dbg; /* for debug */
- u8 signal_strength_dbg; /* for debug */
- s8 rssi;
- s8 rxpwdb;
- u8 signal_strength;
- u8 signal_qual;
- u8 noise;
- int RxSNRdB[2];
- s8 RxRssi[2];
- int FalseAlmCnt_all;
-
- struct timer_list signal_stat_timer;
- u32 signal_stat_sampling_interval;
- struct signal_stat signal_qual_data;
- struct signal_stat signal_strength_data;
-};
-
-#define rtw_set_signal_stat_timer(recvpriv) \
- _set_timer(&(recvpriv)->signal_stat_timer, \
- (recvpriv)->signal_stat_sampling_interval)
-
-struct sta_recv_priv {
- spinlock_t lock;
- int option;
- struct __queue defrag_q; /* keeping the fragment frame until defrag */
- struct stainfo_rxcache rxcache;
-};
-
-struct recv_buf {
- struct adapter *adapter;
- struct urb *purb;
- struct sk_buff *pskb;
- u8 reuse;
-};
-
-/*
- head ----->
-
- data ----->
-
- payload
-
- tail ----->
-
- end ----->
-
- len = (unsigned int )(tail - data);
-
-*/
-struct recv_frame {
- struct list_head list;
- struct sk_buff *pkt;
- struct adapter *adapter;
- u8 fragcnt;
- int frame_tag;
- struct rx_pkt_attrib attrib;
- uint len;
- u8 *rx_head;
- u8 *rx_data;
- u8 *rx_tail;
- u8 *rx_end;
- void *precvbuf;
- struct sta_info *psta;
- /* for A-MPDU Rx reordering buffer control */
- struct recv_reorder_ctrl *preorder_ctrl;
-};
-
-int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void _rtw_free_recv_priv(struct recv_priv *precvpriv);
-s32 rtw_recv_entry(struct recv_frame *precv_frame);
-struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-int rtw_free_recvframe(struct recv_frame *precvframe,
- struct __queue *pfree_recv_queue);
-int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
-int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
-void rtw_free_recvframe_queue(struct __queue *pframequeue,
- struct __queue *pfree_recv_queue);
-u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
-
-void rtw_reordering_ctrl_timeout_handler(void *pcontext);
-
-static inline u8 *get_rxmem(struct recv_frame *precvframe)
-{
- /* always return rx_head... */
- if (precvframe == NULL)
- return NULL;
- return precvframe->rx_head;
-}
-
-static inline u8 *recvframe_pull(struct recv_frame *precvframe, int sz)
-{
- /* rx_data += sz; move rx_data sz bytes hereafter */
-
- /* used for extract sz bytes from rx_data, update rx_data and return
- * the updated rx_data to the caller */
-
- if (precvframe == NULL)
- return NULL;
- precvframe->rx_data += sz;
- if (precvframe->rx_data > precvframe->rx_tail) {
- precvframe->rx_data -= sz;
- return NULL;
- }
- precvframe->len -= sz;
- return precvframe->rx_data;
-}
-
-static inline u8 *recvframe_put(struct recv_frame *precvframe, int sz)
-{
- /* used for append sz bytes from ptr to rx_tail, update rx_tail
- * and return the updated rx_tail to the caller */
- /* after putting, rx_tail must be still larger than rx_end. */
-
- if (precvframe == NULL)
- return NULL;
-
- precvframe->rx_tail += sz;
-
- if (precvframe->rx_tail > precvframe->rx_end) {
- precvframe->rx_tail -= sz;
- return NULL;
- }
- precvframe->len += sz;
- return precvframe->rx_tail;
-}
-
-static inline u8 *recvframe_pull_tail(struct recv_frame *precvframe, int sz)
-{
- /* rmv data from rx_tail (by yitsen) */
-
- /* used for extract sz bytes from rx_end, update rx_end and return
- * the updated rx_end to the caller */
- /* after pulling, rx_end must be still larger than rx_data. */
-
- if (precvframe == NULL)
- return NULL;
- precvframe->rx_tail -= sz;
- if (precvframe->rx_tail < precvframe->rx_data) {
- precvframe->rx_tail += sz;
- return NULL;
- }
- precvframe->len -= sz;
- return precvframe->rx_tail;
-}
-
-static inline int get_recvframe_len(struct recv_frame *precvframe)
-{
- return precvframe->len;
-}
-
-static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
-{
- s32 power; /* in dBm. */
-
- /* Translate to dBm (x=0.5y-95). */
- power = (s32)((sig_stren_index + 1) >> 1);
- power -= 95;
-
- return power;
-}
-
-struct sta_info;
-
-void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
-
-void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_rf.h b/drivers/staging/r8188eu/include/rtw_rf.h
deleted file mode 100644
index b7267e75346c..000000000000
--- a/drivers/staging/r8188eu/include/rtw_rf.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_RF_H_
-#define __RTW_RF_H_
-
-#include "rtw_cmd.h"
-
-#define NumRates (13)
-
-/* slot time for 11g */
-#define SHORT_SLOT_TIME 9
-#define NON_SHORT_SLOT_TIME 20
-
-#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
-
-#define NUM_REGULATORYS 1
-
-struct regulatory_class {
- u32 starting_freq; /* MHz, */
- u8 channel_set[MAX_CHANNEL_NUM];
- u8 channel_cck_power[MAX_CHANNEL_NUM]; /* dbm */
- u8 channel_ofdm_power[MAX_CHANNEL_NUM]; /* dbm */
- u8 txpower_limit; /* dbm */
- u8 channel_spacing; /* MHz */
- u8 modem;
-};
-
-enum capability {
- cESS = 0x0001,
- cIBSS = 0x0002,
- cPollable = 0x0004,
- cPollReq = 0x0008,
- cPrivacy = 0x0010,
- cShortPreamble = 0x0020,
- cPBCC = 0x0040,
- cChannelAgility = 0x0080,
- cSpectrumMgnt = 0x0100,
- cQos = 0x0200, /* For HCCA, use with CF-Pollable
- * and CF-PollReq */
- cShortSlotTime = 0x0400,
- cAPSD = 0x0800,
- cRM = 0x1000, /* RRM (Radio Request Measurement) */
- cDSSS_OFDM = 0x2000,
- cDelayedBA = 0x4000,
- cImmediateBA = 0x8000,
-};
-
-enum _REG_PREAMBLE_MODE {
- PREAMBLE_LONG = 1,
- PREAMBLE_AUTO = 2,
- PREAMBLE_SHORT = 3,
-};
-
-/* Bandwidth Offset */
-#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
-#define HAL_PRIME_CHNL_OFFSET_LOWER 1
-#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
-/* Represent Channel Width in HT Capabilities */
-/* */
-enum ht_channel_width {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_40 = 1,
-};
-
-/* */
-/* Represent Extension Channel Offset in HT Capabilities */
-/* This is available only in 40Mhz mode. */
-/* */
-enum ht_extchnl_offset {
- HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
- HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-};
-
-u32 rtw_ch2freq(u32 ch);
-
-#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_security.h b/drivers/staging/r8188eu/include/rtw_security.h
deleted file mode 100644
index 783ae18a122a..000000000000
--- a/drivers/staging/r8188eu/include/rtw_security.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_SECURITY_H_
-#define __RTW_SECURITY_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include <crypto/arc4.h>
-
-#define _NO_PRIVACY_ 0x0
-#define _WEP40_ 0x1
-#define _TKIP_ 0x2
-#define _TKIP_WTMIC_ 0x3
-#define _AES_ 0x4
-#define _WEP104_ 0x5
-#define _SMS4_ 0x06
-
-#define _WPA_IE_ID_ 0xdd
-#define _WPA2_IE_ID_ 0x30
-
-enum {
- ENCRYP_PROTOCOL_OPENSYS, /* open system */
- ENCRYP_PROTOCOL_WEP, /* WEP */
- ENCRYP_PROTOCOL_WPA, /* WPA */
- ENCRYP_PROTOCOL_WPA2, /* WPA2 */
- ENCRYP_PROTOCOL_WAPI, /* WAPI: Not support in this version */
- ENCRYP_PROTOCOL_MAX
-};
-
-#ifndef Ndis802_11AuthModeWPA2
-#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
-#endif
-
-#ifndef Ndis802_11AuthModeWPA2PSK
-#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
-#endif
-
-union pn48 {
- u64 val;
-
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 TSC0;
- u8 TSC1;
- u8 TSC2;
- u8 TSC3;
- u8 TSC4;
- u8 TSC5;
- u8 TSC6;
- u8 TSC7;
- } _byte_;
-
-#elif defined(__BIG_ENDIAN)
-
- struct {
- u8 TSC7;
- u8 TSC6;
- u8 TSC5;
- u8 TSC4;
- u8 TSC3;
- u8 TSC2;
- u8 TSC1;
- u8 TSC0;
- } _byte_;
-#endif
-};
-
-union Keytype {
- u8 skey[16];
- u32 lkey[4];
-};
-
-struct rt_pmkid_list {
- u8 bUsed;
- u8 Bssid[6];
- u8 PMKID[16];
- u8 SsidBuf[33];
- u8 *ssid_octet;
- u16 ssid_length;
-};
-
-struct security_priv {
- u32 dot11AuthAlgrthm; /* 802.11 auth, could be open,
- * shared, 8021x and authswitch */
- u32 dot11PrivacyAlgrthm; /* This specify the privacy for
- * shared auth. algorithm. */
- /* WEP */
- u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
- * wep, 0~3 for key id.(tx key index) */
- union Keytype dot11DefKey[4]; /* this is only valid for def. key */
- u32 dot11DefKeylen[4];
- u32 dot118021XGrpPrivacy; /* This specify the privacy algthm.
- * used for Grp key */
- u32 dot118021XGrpKeyid; /* key id used for Grp Key
- * ( tx key index) */
- union Keytype dot118021XGrpKey[4]; /* 802.1x Group Key,
- * for inx0 and inx1 */
- union Keytype dot118021XGrptxmickey[4];
- union Keytype dot118021XGrprxmickey[4];
- union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
- union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
-
- struct arc4_ctx xmit_arc4_ctx;
- struct arc4_ctx recv_arc4_ctx;
-
- /* extend security capabilities for AP_MODE */
- unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
- unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
- unsigned int wpa_group_cipher;
- unsigned int wpa2_group_cipher;
- unsigned int wpa_pairwise_cipher;
- unsigned int wpa2_pairwise_cipher;
- u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
- int wps_ie_len;
- u8 binstallGrpkey;
- u8 busetkipkey;
- u8 bcheck_grpkey;
- u8 bgrpkey_handshake;
- s32 sw_encrypt;/* from registry_priv */
- s32 sw_decrypt;/* from registry_priv */
- s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
- * it means the hw has not been ready. */
-
- /* keeps the auth_type & enc_status from upper layer
- * ioctl(wpa_supplicant or wzc) */
- u32 ndisauthtype; /* NDIS_802_11_AUTHENTICATION_MODE */
- u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
- struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
- struct ndis_802_11_wep ndiswep;
- u8 assoc_info[600];
- u8 szofcapability[256]; /* for wpa2 usage */
- u8 oidassociation[512]; /* for wpa/wpa2 usage */
- u8 authenticator_ie[256]; /* store ap security information element */
- u8 supplicant_ie[256]; /* store sta security information element */
-
- /* for tkip countermeasure */
- u32 last_mic_err_time;
- u8 btkip_countermeasure;
- u8 btkip_wait_report;
- u32 btkip_countermeasure_time;
-
- /* */
- /* For WPA2 Pre-Authentication. */
- /* */
- struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
- u8 PMKIDIndex;
- u8 bWepDefaultKeyIdxSet;
-};
-
-#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst) \
-do { \
- switch (psecuritypriv->dot11AuthAlgrthm) { \
- case dot11AuthAlgrthm_Open: \
- case dot11AuthAlgrthm_Shared: \
- case dot11AuthAlgrthm_Auto: \
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
- break; \
- case dot11AuthAlgrthm_8021X: \
- if (bmcst) \
- encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
- else \
- encry_algo = (u8)psta->dot118021XPrivacy; \
- break; \
- case dot11AuthAlgrthm_WAPI: \
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
- break; \
- } \
-} while (0)
-
-#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt) \
-do { \
- switch (encrypt) { \
- case _WEP40_: \
- case _WEP104_: \
- iv_len = 4; \
- icv_len = 4; \
- break; \
- case _TKIP_: \
- iv_len = 8; \
- icv_len = 4; \
- break; \
- case _AES_: \
- iv_len = 8; \
- icv_len = 8; \
- break; \
- case _SMS4_: \
- iv_len = 18; \
- icv_len = 16; \
- break; \
- default: \
- iv_len = 0; \
- icv_len = 0; \
- break; \
- } \
-} while (0)
-
-#define GET_TKIP_PN(iv, dot11txpn) \
-do { \
- dot11txpn._byte_.TSC0 = iv[2]; \
- dot11txpn._byte_.TSC1 = iv[0]; \
- dot11txpn._byte_.TSC2 = iv[4]; \
- dot11txpn._byte_.TSC3 = iv[5]; \
- dot11txpn._byte_.TSC4 = iv[6]; \
- dot11txpn._byte_.TSC5 = iv[7]; \
-} while (0)
-
-#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
-#define ROR32(A, n) ROL32((A), 32-(n))
-
-struct mic_data {
- u32 K0, K1; /* Key */
- u32 L, R; /* Current state */
- u32 M; /* Message accumulator (single word) */
- u32 nBytesInM; /* # bytes in M */
-};
-
-void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
-void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
-void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
-void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst);
-void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len,
- u8 *Miccode, u8 priority);
-u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
-u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
-void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
-u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
-u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
-void rtw_wep_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
-
-#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
deleted file mode 100644
index feeac85aedb0..000000000000
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ /dev/null
@@ -1,334 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_XMIT_H_
-#define _RTW_XMIT_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define NR_XMITFRAME 256
-#define WMM_XMIT_THRESHOLD (NR_XMITFRAME * 2 / 5)
-
-#define MAX_XMITBUF_SZ (20480) /* 20k */
-#define NR_XMITBUFF (4)
-
-#define XMITBUF_ALIGN_SZ 4
-
-/* xmit extension buff defination */
-#define MAX_XMIT_EXTBUF_SZ (1536)
-#define NR_XMIT_EXTBUFF (32)
-
-#define MAX_NUMBLKS (1)
-
-#define XMIT_VO_QUEUE (0)
-#define XMIT_VI_QUEUE (1)
-#define XMIT_BE_QUEUE (2)
-#define XMIT_BK_QUEUE (3)
-
-#define VO_QUEUE_INX 0
-#define VI_QUEUE_INX 1
-#define BE_QUEUE_INX 2
-#define BK_QUEUE_INX 3
-#define BCN_QUEUE_INX 4
-#define MGT_QUEUE_INX 5
-#define HIGH_QUEUE_INX 6
-#define TXCMD_QUEUE_INX 7
-
-#define HW_QUEUE_ENTRY 8
-
-#define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
-do {\
- pattrib_iv[0] = dot11txpn._byte_.TSC0;\
- pattrib_iv[1] = dot11txpn._byte_.TSC1;\
- pattrib_iv[2] = dot11txpn._byte_.TSC2;\
- pattrib_iv[3] = ((keyidx & 0x3)<<6);\
- dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val+1);\
-} while (0)
-
-#define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
-do {\
- pattrib_iv[0] = dot11txpn._byte_.TSC1;\
- pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
- pattrib_iv[2] = dot11txpn._byte_.TSC0;\
- pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
- pattrib_iv[4] = dot11txpn._byte_.TSC2;\
- pattrib_iv[5] = dot11txpn._byte_.TSC3;\
- pattrib_iv[6] = dot11txpn._byte_.TSC4;\
- pattrib_iv[7] = dot11txpn._byte_.TSC5;\
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
-} while (0)
-
-#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
-do { \
- pattrib_iv[0] = dot11txpn._byte_.TSC0; \
- pattrib_iv[1] = dot11txpn._byte_.TSC1; \
- pattrib_iv[2] = 0; \
- pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6); \
- pattrib_iv[4] = dot11txpn._byte_.TSC2; \
- pattrib_iv[5] = dot11txpn._byte_.TSC3; \
- pattrib_iv[6] = dot11txpn._byte_.TSC4; \
- pattrib_iv[7] = dot11txpn._byte_.TSC5; \
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
-} while (0)
-
-#define HWXMIT_ENTRY 4
-
-#define TXDESC_SIZE 32
-
-#define PACKET_OFFSET_SZ (8)
-#define TXDESC_OFFSET (TXDESC_SIZE + PACKET_OFFSET_SZ)
-
-struct tx_desc {
- /* DWORD 0 */
- __le32 txdw0;
- __le32 txdw1;
- __le32 txdw2;
- __le32 txdw3;
- __le32 txdw4;
- __le32 txdw5;
- __le32 txdw6;
- __le32 txdw7;
-};
-
-union txdesc {
- struct tx_desc txdesc;
- unsigned int value[TXDESC_SIZE>>2];
-};
-
-struct hw_xmit {
- struct list_head *sta_list;
- int accnt;
-};
-
-/* reduce size */
-struct pkt_attrib {
- u8 type;
- u8 subtype;
- u8 bswenc;
- u8 dhcp_pkt;
- u16 ether_type;
- u16 seqnum;
- u16 pkt_hdrlen; /* the original 802.3 pkt header len */
- u16 hdrlen; /* the WLAN Header Len */
- u32 pktlen; /* the original 802.3 pkt raw_data len (not include
- * ether_hdr data) */
- u32 last_txcmdsz;
- u8 nr_frags;
- u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorithm */
- u8 iv_len;
- u8 icv_len;
- u8 iv[18];
- u8 icv[16];
- u8 priority;
- u8 ack_policy;
- u8 mac_id;
- u8 vcs_mode; /* virtual carrier sense method */
- u8 dst[ETH_ALEN] __aligned(2);
- u8 src[ETH_ALEN] __aligned(2);
- u8 ta[ETH_ALEN] __aligned(2);
- u8 ra[ETH_ALEN] __aligned(2);
- u8 key_idx;
- u8 qos_en;
- u8 ht_en;
- u8 raid;/* rate adpative id */
- u8 bwmode;
- u8 ch_offset;/* PRIME_CHNL_OFFSET */
- u8 sgi;/* short GI */
- u8 ampdu_en;/* tx ampdu enable */
- u8 mdata;/* more data bit */
- u8 pctrl;/* per packet txdesc control enable */
- u8 triggered;/* for ap mode handling Power Saving sta */
- u8 qsel;
- u8 eosp;
- u8 rate;
- u8 intel_proxim;
- u8 retry_ctrl;
- struct sta_info *psta;
-};
-
-#define WLANHDR_OFFSET 64
-
-#define NULL_FRAMETAG (0x0)
-#define DATA_FRAMETAG 0x01
-#define MGNT_FRAMETAG 0x03
-
-#define TXAGG_FRAMETAG 0x08
-
-struct submit_ctx {
- u32 submit_time; /* */
- u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
- int status; /* status for operation */
- struct completion done;
-};
-
-enum {
- RTW_SCTX_SUBMITTED = -1,
- RTW_SCTX_DONE_SUCCESS = 0,
- RTW_SCTX_DONE_UNKNOWN,
- RTW_SCTX_DONE_TIMEOUT,
- RTW_SCTX_DONE_BUF_ALLOC,
- RTW_SCTX_DONE_BUF_FREE,
- RTW_SCTX_DONE_WRITE_PORT_ERR,
- RTW_SCTX_DONE_TX_DESC_NA,
- RTW_SCTX_DONE_TX_DENY,
- RTW_SCTX_DONE_CCX_PKT_FAIL,
- RTW_SCTX_DONE_DRV_STOP,
- RTW_SCTX_DONE_DEV_REMOVE,
-};
-
-void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
-int rtw_sctx_wait(struct submit_ctx *sctx);
-void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
-
-struct xmit_buf {
- struct list_head list;
- struct adapter *padapter;
- u8 *pallocated_buf;
- u8 *pbuf;
- void *priv_data;
- u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
- bool high_queue;
- u32 alloc_sz;
- u32 len;
- struct submit_ctx *sctx;
- struct urb *pxmit_urb;
- int last[8];
-};
-
-struct xmit_frame {
- struct list_head list;
- struct pkt_attrib attrib;
- struct sk_buff *pkt;
- int frame_tag;
- struct adapter *padapter;
- u8 *buf_addr;
- struct xmit_buf *pxmitbuf;
-
- u8 agg_num;
- s8 pkt_offset;
- u8 ack_report;
-};
-
-struct tx_servq {
- struct list_head tx_pending;
- struct list_head sta_pending;
- int qcnt;
-};
-
-struct sta_xmit_priv {
- spinlock_t lock;
- struct tx_servq be_q; /* priority == 0,3 */
- struct tx_servq bk_q; /* priority == 1,2 */
- struct tx_servq vi_q; /* priority == 4,5 */
- struct tx_servq vo_q; /* priority == 6,7 */
- u16 txseq_tid[16];
-};
-
-struct hw_txqueue {
- volatile int head;
- volatile int tail;
- volatile int free_sz; /* in units of 64 bytes */
- volatile int free_cmdsz;
- volatile int txsz[8];
- uint ff_hwaddr;
- uint cmd_hwaddr;
- int ac_tag;
-};
-
-struct xmit_priv {
- spinlock_t lock;
- struct list_head be_pending;
- struct list_head bk_pending;
- struct list_head vi_pending;
- struct list_head vo_pending;
- u8 *pallocated_frame_buf;
- u8 *pxmit_frame_buf;
- uint free_xmitframe_cnt;
- struct __queue free_xmit_queue;
- uint frag_len;
- struct adapter *adapter;
- u64 tx_bytes;
- u64 tx_pkts;
- u64 tx_drop;
- u64 last_tx_bytes;
- u64 last_tx_pkts;
- struct hw_xmit *hwxmits;
- u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
- * from large to small. it's value is 0->vo,
- * 1->vi, 2->be, 3->bk. */
- struct tasklet_struct xmit_tasklet;
- struct __queue free_xmitbuf_queue;
- struct __queue pending_xmitbuf_queue;
- u8 *pallocated_xmitbuf;
- u8 *pxmitbuf;
- uint free_xmitbuf_cnt;
- struct __queue free_xmit_extbuf_queue;
- u8 *pallocated_xmit_extbuf;
- u8 *pxmit_extbuf;
- uint free_xmit_extbuf_cnt;
- u16 nqos_ssn;
- int ack_tx;
- struct mutex ack_tx_mutex;
- struct submit_ctx ack_tx_ops;
-};
-
-struct pkt_file {
- struct sk_buff *pkt;
- size_t pkt_len; /* the remainder length of the open_file */
- unsigned char *cur_buffer;
- u8 *buf_start;
- u8 *cur_addr;
- size_t buf_len;
-};
-
-struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
-struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
-void rtw_count_tx_stats(struct adapter *padapter,
- struct xmit_frame *pxmitframe, int sz);
-s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr,
- struct pkt_attrib *pattrib);
-s32 rtw_put_snap(u8 *data, u16 h_proto);
-
-struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv,
- struct xmit_frame *pxmitframe);
-void rtw_free_xmitframe_list(struct xmit_priv *pxmitpriv, struct list_head *xframe_list);
-struct tx_servq *rtw_get_sta_pending(struct adapter *padapter,
- struct sta_info *psta, int up, u8 *ac);
-struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
- struct hw_xmit *phwxmit_i);
-
-s32 rtw_xmit_classifier(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
-s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
- struct xmit_frame *pxmitframe);
-s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
-void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
-s32 rtw_txframes_pending(struct adapter *padapter);
-s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
- struct pkt_attrib *pattrib);
-int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
-void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-int rtw_alloc_hwxmits(struct adapter *padapter);
-s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
-
-int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
-void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
-void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
-void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
-
-u8 qos_acm(u8 acm_mask, u8 priority);
-u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
-int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
-void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
-
-void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe);
-netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
-
-#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/r8188eu/include/sta_info.h b/drivers/staging/r8188eu/include/sta_info.h
deleted file mode 100644
index e42f4b4c6e24..000000000000
--- a/drivers/staging/r8188eu/include/sta_info.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __STA_INFO_H_
-#define __STA_INFO_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "wifi.h"
-
-#define IBSS_START_MAC_ID 2
-#define NUM_STA 32
-#define NUM_ACL 16
-
-/* if mode ==0, then the sta is allowed once the addr is hit. */
-/* if mode ==1, then the sta is rejected once the addr is non-hit. */
-struct rtw_wlan_acl_node {
- struct list_head list;
- u8 addr[ETH_ALEN];
- u8 valid;
-};
-
-/* mode=0, disable */
-/* mode=1, accept unless in deny list */
-/* mode=2, deny unless in accept list */
-struct wlan_acl_pool {
- int mode;
- int num;
- struct rtw_wlan_acl_node aclnode[NUM_ACL];
- struct __queue acl_node_q;
-};
-
-struct rssi_sta {
- s32 UndecoratedSmoothedPWDB;
- s32 UndecoratedSmoothedCCK;
- s32 UndecoratedSmoothedOFDM;
- u64 PacketMap;
- u8 ValidBit;
-};
-
-struct stainfo_stats {
- u64 rx_mgnt_pkts;
- u64 rx_beacon_pkts;
- u64 rx_probereq_pkts;
- u64 rx_probersp_pkts;
- u64 rx_probersp_bm_pkts;
- u64 rx_probersp_uo_pkts;
- u64 rx_ctrl_pkts;
- u64 rx_data_pkts;
-
- u64 last_rx_beacon_pkts;
- u64 last_rx_probereq_pkts;
- u64 last_rx_probersp_pkts;
- u64 last_rx_probersp_bm_pkts;
- u64 last_rx_probersp_uo_pkts;
- u64 last_rx_ctrl_pkts;
- u64 last_rx_data_pkts;
- u64 rx_bytes;
- u64 rx_drops;
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_drops;
-};
-
-struct sta_info {
- spinlock_t lock;
- struct list_head list; /* free_sta_queue */
- struct list_head hash_list; /* sta_hash */
-
- struct sta_xmit_priv sta_xmitpriv;
- struct sta_recv_priv sta_recvpriv;
-
- struct __queue sleep_q;
- unsigned int sleepq_len;
-
- uint state;
- uint aid;
- uint mac_id;
- uint qos_option;
- u8 hwaddr[ETH_ALEN];
-
- uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
- uint dot118021XPrivacy; /* aes, tkip... */
- union Keytype dot11tkiptxmickey;
- union Keytype dot11tkiprxmickey;
- union Keytype dot118021x_UncstKey;
- union pn48 dot11txpn; /* PN48 used for Unicast xmit. */
- union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
- u8 bssrateset[16];
- u32 bssratelen;
- s32 rssi;
- s32 signal_quality;
-
- u8 cts2self;
- u8 rtsen;
-
- u8 raid;
- u8 init_rate;
- u32 ra_mask;
- u8 wireless_mode; /* NETWORK_TYPE */
- struct stainfo_stats sta_stats;
-
- /* for A-MPDU TX, ADDBA timeout check */
- struct timer_list addba_retry_timer;
-
- /* for A-MPDU Rx reordering buffer control */
- struct recv_reorder_ctrl recvreorder_ctrl[16];
-
- /* for A-MPDU Tx */
- /* unsigned char ampdu_txen_bitmap; */
- u16 BA_starting_seqctrl[16];
-
- struct ht_priv htpriv;
-
- /* Notes: */
- /* STA_Mode: */
- /* curr_network(mlme_priv/security_priv/qos/ht) +
- * sta_info: (STA & AP) CAP/INFO */
- /* scan_q: AP CAP/INFO */
-
- /* AP_Mode: */
- /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
- /* sta_info: (AP & STA) CAP/INFO */
-
- struct list_head asoc_list;
- struct list_head auth_list;
-
- unsigned int expire_to;
- unsigned int auth_seq;
- unsigned int authalg;
- unsigned char chg_txt[128];
-
- u16 capability;
- int flags;
-
- int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
- int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
- int wpa_group_cipher;
- int wpa2_group_cipher;
- int wpa_pairwise_cipher;
- int wpa2_pairwise_cipher;
-
- u8 bpairwise_key_installed;
-
- u8 wpa_ie[32];
-
- u8 nonerp_set;
- u8 no_short_slot_time_set;
- u8 no_short_preamble_set;
- u8 no_ht_gf_set;
- u8 no_ht_set;
- u8 ht_20mhz_set;
-
- unsigned int tx_ra_bitmap;
- u8 qos_info;
-
- u8 max_sp_len;
- u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
- u8 uapsd_be;
- u8 uapsd_vi;
- u8 uapsd_vo;
-
- u8 has_legacy_ac;
- unsigned int sleepq_ac_len;
-
- /* p2p priv data */
- u8 is_p2p_device;
- u8 p2p_status_code;
-
- /* p2p client info */
- u8 dev_addr[ETH_ALEN];
- u8 dev_cap;
- u16 config_methods;
- u8 primary_dev_type[8];
- u8 num_of_secdev_type;
- u8 secdev_types_list[32];/* 32/8 == 4; */
- u16 dev_name_len;
- u8 dev_name[32];
- u8 under_exist_checking;
- u8 keep_alive_trycnt;
-
- /* for DM */
- struct rssi_sta rssi_stat;
-
- /* ================ODM Relative Info======================= */
- /* Please be careful, don't declare too much structure here.
- * It will cost memory * STA support num. */
- /* 2011/10/20 MH Add for ODM STA info. */
- /* Driver Write */
- u8 bValid; /* record the sta status link or not? */
- u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
- u8 rssi_level; /* for Refresh RA mask */
- /* ODM Write */
- /* 1 PHY_STATUS_INFO */
- u8 RSSI_Path[4]; /* */
- u8 RSSI_Ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-
- /* ================ODM Relative Info======================= */
- /* */
-
- /* To store the sequence number of received management frame */
- u16 RxMgmtFrameSeqNum;
-};
-
-#define sta_rx_pkts(sta) \
- (sta->sta_stats.rx_mgnt_pkts \
- + sta->sta_stats.rx_ctrl_pkts \
- + sta->sta_stats.rx_data_pkts)
-
-#define sta_rx_data_pkts(sta) \
- (sta->sta_stats.rx_data_pkts)
-
-#define sta_last_rx_data_pkts(sta) \
- (sta->sta_stats.last_rx_data_pkts)
-
-#define sta_rx_beacon_pkts(sta) \
- (sta->sta_stats.rx_beacon_pkts)
-
-#define sta_last_rx_beacon_pkts(sta) \
- (sta->sta_stats.last_rx_beacon_pkts)
-
-#define sta_rx_probersp_pkts(sta) \
- (sta->sta_stats.rx_probersp_pkts)
-
-#define sta_last_rx_probersp_pkts(sta) \
- (sta->sta_stats.last_rx_probersp_pkts)
-
-#define sta_update_last_rx_pkts(sta) \
-do { \
- sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
- sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
- sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
- sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
- sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
- sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
- sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
-} while (0)
-
-struct sta_priv {
- u8 *pallocated_stainfo_buf;
- u8 *pstainfo_buf;
- struct __queue free_sta_queue;
-
- spinlock_t sta_hash_lock;
- struct list_head sta_hash[NUM_STA];
- int asoc_sta_count;
- struct __queue sleep_q;
- struct __queue wakeup_q;
-
- struct adapter *padapter;
-
- spinlock_t asoc_list_lock;
- struct list_head asoc_list;
-
- struct list_head auth_list;
- spinlock_t auth_list_lock;
- u8 asoc_list_cnt;
- u8 auth_list_cnt;
-
- unsigned int auth_to; /* sec, time to expire in authenticating. */
- unsigned int assoc_to; /* sec, time to expire before associating. */
- unsigned int expire_to; /* sec , time to expire after associated. */
-
- /* pointers to STA info; based on allocated AID or NULL if AID free
- * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
- * and so on
- */
- struct sta_info *sta_aid[NUM_STA];
-
- u16 sta_dz_bitmap;/* only support 15 stations, station aid bitmap
- * for sleeping sta. */
- u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
- * bit0~bit15 */
-
- u16 max_num_sta;
-
- struct wlan_acl_pool acl_list;
-};
-
-static inline u32 wifi_mac_hash(u8 *mac)
-{
- u32 x;
-
- x = mac[0];
- x = (x << 2) ^ mac[1];
- x = (x << 2) ^ mac[2];
- x = (x << 2) ^ mac[3];
- x = (x << 2) ^ mac[4];
- x = (x << 2) ^ mac[5];
-
- x ^= x >> 8;
- x = x & (NUM_STA - 1);
- return x;
-}
-
-int _rtw_init_sta_priv(struct sta_priv *pstapriv);
-void _rtw_free_sta_priv(struct sta_priv *pstapriv);
-
-#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
-int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
-struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off);
-
-struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-void rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
-void rtw_free_all_stainfo(struct adapter *adapt);
-struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
-struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
-
-#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_ops.h b/drivers/staging/r8188eu/include/usb_ops.h
deleted file mode 100644
index 5bd8ce37aebf..000000000000
--- a/drivers/staging/r8188eu/include/usb_ops.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __USB_OPS_H_
-#define __USB_OPS_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "osdep_intf.h"
-
-#define REALTEK_USB_VENQT_READ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
-#define REALTEK_USB_VENQT_WRITE (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
-#define REALTEK_USB_VENQT_CMD_REQ 0x05
-#define REALTEK_USB_VENQT_CMD_IDX 0x00
-
-#define ALIGNMENT_UNIT 16
-#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
-#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE + ALIGNMENT_UNIT)
-
-/*
- * Increase and check if the continual_urb_error of this @param dvobjprivei
- * is larger than MAX_CONTINUAL_URB_ERR
- * @return true:
- * @return false:
- */
-static inline bool rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
-{
- int value = atomic_inc_return(&dvobj->continual_urb_error);
-
- if (value > MAX_CONTINUAL_URB_ERR)
- return true;
-
- return false;
-}
-
-/*
-* Set the continual_urb_error of this @param dvobjprive to 0
-*/
-static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
-{
- atomic_set(&dvobj->continual_urb_error, 0);
-}
-
-#define USB_HIGH_SPEED_BULK_SIZE 512
-#define USB_FULL_SPEED_BULK_SIZE 64
-
-static inline bool rtw_usb_bulk_size_boundary(struct adapter *padapter, int buf_len)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
-
- if (pdvobjpriv->pusbdev->speed == USB_SPEED_HIGH)
- return buf_len % USB_HIGH_SPEED_BULK_SIZE == 0;
- else
- return buf_len % USB_FULL_SPEED_BULK_SIZE == 0;
-}
-
-#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
deleted file mode 100644
index f271e93e9ab9..000000000000
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __USB_OSINTF_H
-#define __USB_OSINTF_H
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-extern char *rtw_initmac;
-extern int rtw_mc2u_disable;
-
-#define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
-
-void netdev_br_init(struct net_device *netdev);
-void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
-void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
-void nat25_db_expire(struct adapter *priv);
-int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
deleted file mode 100644
index 254a4bc1a141..000000000000
--- a/drivers/staging/r8188eu/include/wifi.h
+++ /dev/null
@@ -1,773 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef _WIFI_H_
-#define _WIFI_H_
-
-#include <linux/bits.h>
-#include <linux/ieee80211.h>
-
-#define WLAN_ETHHDR_LEN 14
-#define WLAN_HDR_A3_LEN 24
-#define WLAN_HDR_A3_QOS_LEN 26
-#define WLAN_SSID_MAXLEN 32
-
-enum WIFI_FRAME_SUBTYPE {
- /* below is for mgt frame */
- WIFI_ASSOCREQ = (0 | IEEE80211_FTYPE_MGMT),
- WIFI_ASSOCRSP = (BIT(4) | IEEE80211_FTYPE_MGMT),
- WIFI_REASSOCREQ = (BIT(5) | IEEE80211_FTYPE_MGMT),
- WIFI_REASSOCRSP = (BIT(5) | BIT(4) | IEEE80211_FTYPE_MGMT),
- WIFI_PROBEREQ = (BIT(6) | IEEE80211_FTYPE_MGMT),
- WIFI_PROBERSP = (BIT(6) | BIT(4) | IEEE80211_FTYPE_MGMT),
- WIFI_BEACON = (BIT(7) | IEEE80211_FTYPE_MGMT),
- WIFI_ATIM = (BIT(7) | BIT(4) | IEEE80211_FTYPE_MGMT),
- WIFI_DISASSOC = (BIT(7) | BIT(5) | IEEE80211_FTYPE_MGMT),
- WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | IEEE80211_FTYPE_MGMT),
- WIFI_DEAUTH = (BIT(7) | BIT(6) | IEEE80211_FTYPE_MGMT),
- WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | IEEE80211_FTYPE_MGMT),
-
- /* below is for control frame */
- WIFI_PSPOLL = (BIT(7) | BIT(5) | IEEE80211_FTYPE_CTL),
-
- /* below is for data frame */
- WIFI_DATA = (0 | IEEE80211_FTYPE_DATA),
- WIFI_DATA_CFACK = (BIT(4) | IEEE80211_FTYPE_DATA),
- WIFI_DATA_CFPOLL = (BIT(5) | IEEE80211_FTYPE_DATA),
- WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | IEEE80211_FTYPE_DATA),
- WIFI_DATA_NULL = (BIT(6) | IEEE80211_FTYPE_DATA),
- WIFI_QOS_DATA_NULL = (BIT(6) | IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA),
-};
-
-enum WIFI_REASON_CODE {
- _RSON_RESERVED_ = 0,
- _RSON_UNSPECIFIED_ = 1,
- _RSON_AUTH_NO_LONGER_VALID_ = 2,
- _RSON_DEAUTH_STA_LEAVING_ = 3,
- _RSON_INACTIVITY_ = 4,
- _RSON_UNABLE_HANDLE_ = 5,
- _RSON_CLS2_ = 6,
- _RSON_CLS3_ = 7,
- _RSON_DISAOC_STA_LEAVING_ = 8,
- _RSON_ASOC_NOT_AUTH_ = 9,
-
- /* WPA reason */
- _RSON_INVALID_IE_ = 13,
- _RSON_MIC_FAILURE_ = 14,
- _RSON_4WAY_HNDSHK_TIMEOUT_ = 15,
- _RSON_GROUP_KEY_UPDATE_TIMEOUT_ = 16,
- _RSON_DIFF_IE_ = 17,
- _RSON_MLTCST_CIPHER_NOT_VALID_ = 18,
- _RSON_UNICST_CIPHER_NOT_VALID_ = 19,
- _RSON_AKMP_NOT_VALID_ = 20,
- _RSON_UNSUPPORT_RSNE_VER_ = 21,
- _RSON_INVALID_RSNE_CAP_ = 22,
- _RSON_IEEE_802DOT1X_AUTH_FAIL_ = 23,
-
- /* belowing are Realtek definition */
- _RSON_PMK_NOT_AVAILABLE_ = 24,
- _RSON_TDLS_TEAR_TOOFAR_ = 25,
- _RSON_TDLS_TEAR_UN_RSN_ = 26,
-};
-
-enum WIFI_STATUS_CODE {
- _STATS_SUCCESSFUL_ = 0,
- _STATS_FAILURE_ = 1,
- _STATS_CAP_FAIL_ = 10,
- _STATS_NO_ASOC_ = 11,
- _STATS_OTHER_ = 12,
- _STATS_NO_SUPP_ALG_ = 13,
- _STATS_OUT_OF_AUTH_SEQ_ = 14,
- _STATS_CHALLENGE_FAIL_ = 15,
- _STATS_AUTH_TIMEOUT_ = 16,
- _STATS_UNABLE_HANDLE_STA_ = 17,
- _STATS_RATE_FAIL_ = 18,
-};
-
-/* entended */
-/* IEEE 802.11b */
-#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
-#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
-#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
-/* IEEE 802.11h */
-#define WLAN_STATUS_SPEC_MGMT_REQUIRED 22
-#define WLAN_STATUS_PWR_CAPABILITY_NOT_VALID 23
-#define WLAN_STATUS_SUPPORTED_CHANNEL_NOT_VALID 24
-/* IEEE 802.11g */
-#define WLAN_STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME 25
-#define WLAN_STATUS_ASSOC_DENIED_NO_ER_PBCC 26
-#define WLAN_STATUS_ASSOC_DENIED_NO_DSSS_OFDM 27
-/* IEEE 802.11w */
-#define WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY 30
-#define WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION 31
-/* IEEE 802.11i */
-#define WLAN_STATUS_INVALID_IE 40
-#define WLAN_STATUS_GROUP_CIPHER_NOT_VALID 41
-#define WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID 42
-#define WLAN_STATUS_AKMP_NOT_VALID 43
-#define WLAN_STATUS_UNSUPPORTED_RSN_IE_VERSION 44
-#define WLAN_STATUS_INVALID_RSN_IE_CAPAB 45
-#define WLAN_STATUS_CIPHER_REJECTED_PER_POLICY 46
-#define WLAN_STATUS_TS_NOT_CREATED 47
-#define WLAN_STATUS_DIRECT_LINK_NOT_ALLOWED 48
-#define WLAN_STATUS_DEST_STA_NOT_PRESENT 49
-#define WLAN_STATUS_DEST_STA_NOT_QOS_STA 50
-#define WLAN_STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE 51
-/* IEEE 802.11r */
-#define WLAN_STATUS_INVALID_FT_ACTION_FRAME_COUNT 52
-#define WLAN_STATUS_INVALID_PMKID 53
-#define WLAN_STATUS_INVALID_MDIE 54
-#define WLAN_STATUS_INVALID_FTIE 55
-
-enum WIFI_REG_DOMAIN {
- DOMAIN_FCC = 1,
- DOMAIN_IC = 2,
- DOMAIN_ETSI = 3,
- DOMAIN_SPA = 4,
- DOMAIN_FRANCE = 5,
- DOMAIN_MKK = 6,
- DOMAIN_ISRAEL = 7,
- DOMAIN_MKK1 = 8,
- DOMAIN_MKK2 = 9,
- DOMAIN_MKK3 = 10,
- DOMAIN_MAX
-};
-
-#define _TO_DS_ BIT(8)
-#define _FROM_DS_ BIT(9)
-#define _MORE_FRAG_ BIT(10)
-#define _RETRY_ BIT(11)
-#define _PWRMGT_ BIT(12)
-#define _MORE_DATA_ BIT(13)
-#define _PRIVACY_ BIT(14)
-
-#define SetToDs(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_)
-
-#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_TO_DS_)) != 0)
-
-#define SetFrDs(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_FROM_DS_)
-
-#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
-
-#define SetMFrag(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
-
-#define ClearMFrag(pbuf) \
- *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_))
-
-#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_RETRY_)) != 0)
-
-#define SetPwrMgt(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_PWRMGT_)
-
-#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_PWRMGT_)) != 0)
-
-#define SetMData(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_DATA_)
-
-#define SetPrivacy(pbuf) \
- *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_)
-
-#define GetFrameType(pbuf) \
- (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2)))
-
-#define GetFrameSubType(pbuf) (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(7) |\
- BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2)))
-
-#define SetFrameSubType(pbuf, type) \
- do { \
- *(__le16 *)(pbuf) &= cpu_to_le16(~(BIT(7) | BIT(6) | \
- BIT(5) | BIT(4) | BIT(3) | BIT(2))); \
- *(__le16 *)(pbuf) |= cpu_to_le16(type); \
- } while (0)
-
-#define SetSeqNum(pbuf, num) \
- do { \
- *(__le16 *)((size_t)(pbuf) + 22) = \
- ((*(__le16 *)((size_t)(pbuf) + 22)) & cpu_to_le16((unsigned short)0x000f)) | \
- cpu_to_le16((unsigned short)(0xfff0 & (num << 4))); \
- } while (0)
-
-#define SetDuration(pbuf, dur) \
- *(__le16 *)((size_t)(pbuf) + 2) = cpu_to_le16(0xffff & (dur))
-
-#define SetPriority(pbuf, tid) \
- *(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
-
-#define SetEOSP(pbuf, eosp) \
- *(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
-
-#define SetAckpolicy(pbuf, ack) \
- *(__le16 *)(pbuf) |= cpu_to_le16((ack & 3) << 5)
-
-#define GetAckpolicy(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 5) & 0x3)
-
-#define GetAMsdu(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 7) & 0x1)
-
-#define GetAddr1Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 4))
-
-#define GetAddr2Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 10))
-
-#define GetAddr3Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 16))
-
-#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
-
-static inline unsigned char *get_sa(unsigned char *pframe)
-{
- unsigned char *sa;
- unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
-
- switch (to_fr_ds) {
- case 0x00: /* ToDs=0, FromDs=0 */
- sa = GetAddr2Ptr(pframe);
- break;
- case 0x01: /* ToDs=0, FromDs=1 */
- sa = GetAddr3Ptr(pframe);
- break;
- case 0x02: /* ToDs=1, FromDs=0 */
- sa = GetAddr2Ptr(pframe);
- break;
- default: /* ToDs=1, FromDs=1 */
- sa = GetAddr4Ptr(pframe);
- break;
- }
- return sa;
-}
-
-static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
-{
- unsigned char *sa;
- unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
-
- switch (to_fr_ds) {
- case 0x00: /* ToDs=0, FromDs=0 */
- sa = GetAddr3Ptr(pframe);
- break;
- case 0x01: /* ToDs=0, FromDs=1 */
- sa = GetAddr2Ptr(pframe);
- break;
- case 0x02: /* ToDs=1, FromDs=0 */
- sa = GetAddr1Ptr(pframe);
- break;
- case 0x03: /* ToDs=1, FromDs=1 */
- sa = GetAddr1Ptr(pframe);
- break;
- default:
- sa = NULL; /* */
- break;
- }
- return sa;
-}
-
-/*-----------------------------------------------------------------------------
- Below is for the security related definition
-------------------------------------------------------------------------------*/
-#define _RESERVED_FRAME_TYPE_ 0
-#define _SKB_FRAME_TYPE_ 2
-#define _PRE_ALLOCMEM_ 1
-#define _PRE_ALLOCHDR_ 3
-#define _PRE_ALLOCLLCHDR_ 4
-#define _PRE_ALLOCICVHDR_ 5
-#define _PRE_ALLOCMICHDR_ 6
-
-#define _SIFSTIME_ \
- (priv->pmib->dot11BssType.net_work_type = 10)
-#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
-#define _CRCLNG_ 4
-
-#define _ASOCREQ_IE_OFFSET_ 4 /* excluding wlan_hdr */
-#define _ASOCRSP_IE_OFFSET_ 6
-#define _REASOCREQ_IE_OFFSET_ 10
-#define _REASOCRSP_IE_OFFSET_ 6
-#define _PROBEREQ_IE_OFFSET_ 0
-#define _PROBERSP_IE_OFFSET_ 12
-#define _AUTH_IE_OFFSET_ 6
-#define _DEAUTH_IE_OFFSET_ 0
-#define _BEACON_IE_OFFSET_ 12
-#define _PUBLIC_ACTION_IE_OFFSET_ 8
-
-#define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
-
-#define _SSID_IE_ 0
-#define _SUPPORTEDRATES_IE_ 1
-#define _DSSET_IE_ 3
-#define _TIM_IE_ 5
-#define _IBSS_PARA_IE_ 6
-#define _COUNTRY_IE_ 7
-#define _CHLGETXT_IE_ 16
-#define _SUPPORTED_CH_IE_ 36
-#define _CH_SWTICH_ANNOUNCE_ 37 /* Secondary Channel Offset */
-#define _RSN_IE_2_ 48
-#define _SSN_IE_1_ 221
-#define _ERPINFO_IE_ 42
-#define _EXT_SUPPORTEDRATES_IE_ 50
-
-#define _HT_CAPABILITY_IE_ 45
-#define _FTIE_ 55
-#define _TIMEOUT_ITVL_IE_ 56
-#define _SRC_IE_ 59
-#define _HT_EXTRA_INFO_IE_ 61
-#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
-#define _WAPI_IE_ 68
-
-#define EID_BSSCoexistence 72 /* 20/40 BSS Coexistence */
-#define EID_BSSIntolerantChlReport 73
-#define _RIC_Descriptor_IE_ 75
-
-#define _LINK_ID_IE_ 101
-#define _CH_SWITCH_TIMING_ 104
-#define _PTI_BUFFER_STATUS_ 106
-#define _EXT_CAP_IE_ 127
-#define _VENDOR_SPECIFIC_IE_ 221
-
-#define _RESERVED47_ 47
-
-/* ---------------------------------------------------------------------------
- Below is the fixed elements...
------------------------------------------------------------------------------*/
-#define _AUTH_ALGM_NUM_ 2
-#define _AUTH_SEQ_NUM_ 2
-#define _BEACON_ITERVAL_ 2
-#define _CAPABILITY_ 2
-#define _CURRENT_APADDR_ 6
-#define _LISTEN_INTERVAL_ 2
-#define _RSON_CODE_ 2
-#define _ASOC_ID_ 2
-#define _STATUS_CODE_ 2
-#define _TIMESTAMP_ 8
-
-#define cap_ESS BIT(0)
-#define cap_IBSS BIT(1)
-#define cap_CFPollable BIT(2)
-#define cap_CFRequest BIT(3)
-#define cap_Privacy BIT(4)
-#define cap_ShortPremble BIT(5)
-#define cap_PBCC BIT(6)
-#define cap_ChAgility BIT(7)
-#define cap_SpecMgmt BIT(8)
-#define cap_QoSi BIT(9)
-#define cap_ShortSlot BIT(10)
-
-/*-----------------------------------------------------------------------------
- Below is the definition for 802.11i / 802.1x
-------------------------------------------------------------------------------*/
-#define _IEEE8021X_MGT_ 1 /* WPA */
-#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
-
-/*-----------------------------------------------------------------------------
- Below is the definition for WMM
-------------------------------------------------------------------------------*/
-#define _WMM_IE_Length_ 7 /* for WMM STA */
-#define _WMM_Para_Element_Length_ 24
-
-/*-----------------------------------------------------------------------------
- Below is the definition for 802.11n
-------------------------------------------------------------------------------*/
-
-/**
- * struct rtw_ieee80211_bar - HT Block Ack Request
- *
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
- */
-struct rtw_ieee80211_bar {
- __le16 frame_control;
- __le16 duration;
- unsigned char ra[ETH_ALEN];
- unsigned char ta[ETH_ALEN];
- __le16 control;
- __le16 start_seq_num;
-} __packed;
-
-/**
- * struct ieee80211_ht_cap - HT additional information
- *
- * This structure refers to "HT information element" as
- * described in 802.11n draft section 7.3.2.53
- */
-struct ieee80211_ht_addt_info {
- unsigned char control_chan;
- unsigned char ht_param;
- __le16 operation_mode;
- __le16 stbc_param;
- unsigned char basic_set[16];
-} __packed;
-
-struct HT_caps_element {
- union {
- struct {
- __le16 HT_caps_info;
- unsigned char AMPDU_para;
- unsigned char MCS_rate[16];
- __le16 HT_ext_caps;
- __le16 Beamforming_caps;
- unsigned char ASEL_caps;
- } HT_cap_element;
- unsigned char HT_cap[26];
- } u;
-} __packed;
-
-struct HT_info_element {
- unsigned char primary_channel;
- unsigned char infos[5];
- unsigned char MCS_rate[16];
-} __packed;
-
-struct AC_param {
- unsigned char ACI_AIFSN;
- unsigned char CW;
- __le16 TXOP_limit;
-} __packed;
-
-struct WMM_para_element {
- unsigned char QoS_info;
- unsigned char reserved;
- struct AC_param ac_param[4];
-} __packed;
-
-#define MAX_AMPDU_FACTOR_64K 3
-
-/* Spatial Multiplexing Power Save Modes */
-#define WLAN_HT_CAP_SM_PS_STATIC 0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
-#define WLAN_HT_CAP_SM_PS_INVALID 2
-#define WLAN_HT_CAP_SM_PS_DISABLED 3
-
-#define OP_MODE_PURE 0
-#define OP_MODE_MAY_BE_LEGACY_STAS 1
-#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
-#define OP_MODE_MIXED 3
-
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
-#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
-#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
-#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
-
-#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
- ((u16) (0x0001 | 0x0002))
-#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
-#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
-#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
-#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
-
-/* ===============WPS Section=============== */
-/* For WPSv1.0 */
-#define WPSOUI 0x0050f204
-/* WPS attribute ID */
-#define WPS_ATTR_VER1 0x104A
-#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
-#define WPS_ATTR_RESP_TYPE 0x103B
-#define WPS_ATTR_UUID_E 0x1047
-#define WPS_ATTR_MANUFACTURER 0x1021
-#define WPS_ATTR_MODEL_NAME 0x1023
-#define WPS_ATTR_MODEL_NUMBER 0x1024
-#define WPS_ATTR_SERIAL_NUMBER 0x1042
-#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
-#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
-#define WPS_ATTR_DEVICE_NAME 0x1011
-#define WPS_ATTR_CONF_METHOD 0x1008
-#define WPS_ATTR_RF_BANDS 0x103C
-#define WPS_ATTR_DEVICE_PWID 0x1012
-#define WPS_ATTR_REQUEST_TYPE 0x103A
-#define WPS_ATTR_ASSOCIATION_STATE 0x1002
-#define WPS_ATTR_CONFIG_ERROR 0x1009
-#define WPS_ATTR_VENDOR_EXT 0x1049
-#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
-
-/* Value of WPS attribute "WPS_ATTR_DEVICE_NAME */
-#define WPS_MAX_DEVICE_NAME_LEN 32
-
-/* Value of WPS Request Type Attribute */
-#define WPS_REQ_TYPE_ENROLLEE_INFO_ONLY 0x00
-#define WPS_REQ_TYPE_ENROLLEE_OPEN_8021X 0x01
-#define WPS_REQ_TYPE_REGISTRAR 0x02
-#define WPS_REQ_TYPE_WLAN_MANAGER_REGISTRAR 0x03
-
-/* Value of WPS Response Type Attribute */
-#define WPS_RESPONSE_TYPE_INFO_ONLY 0x00
-#define WPS_RESPONSE_TYPE_8021X 0x01
-#define WPS_RESPONSE_TYPE_REGISTRAR 0x02
-#define WPS_RESPONSE_TYPE_AP 0x03
-
-/* Value of WPS WiFi Simple Configuration State Attribute */
-#define WPS_WSC_STATE_NOT_CONFIG 0x01
-#define WPS_WSC_STATE_CONFIG 0x02
-
-/* Value of WPS Version Attribute */
-#define WPS_VERSION_1 0x10
-
-/* Value of WPS Configuration Method Attribute */
-#define WPS_CONFIG_METHOD_FLASH 0x0001
-#define WPS_CONFIG_METHOD_ETHERNET 0x0002
-#define WPS_CONFIG_METHOD_LABEL 0x0004
-#define WPS_CONFIG_METHOD_DISPLAY 0x0008
-#define WPS_CONFIG_METHOD_E_NFC 0x0010
-#define WPS_CONFIG_METHOD_I_NFC 0x0020
-#define WPS_CONFIG_METHOD_NFC 0x0040
-#define WPS_CONFIG_METHOD_PBC 0x0080
-#define WPS_CONFIG_METHOD_KEYPAD 0x0100
-#define WPS_CONFIG_METHOD_VPBC 0x0280
-#define WPS_CONFIG_METHOD_PPBC 0x0480
-#define WPS_CONFIG_METHOD_VDISPLAY 0x2008
-#define WPS_CONFIG_METHOD_PDISPLAY 0x4008
-
-/* Value of Category ID of WPS Primary Device Type Attribute */
-#define WPS_PDT_CID_DISPLAYS 0x0007
-#define WPS_PDT_CID_MULIT_MEDIA 0x0008
-#define WPS_PDT_CID_RTK_WIDI WPS_PDT_CID_MULIT_MEDIA
-
-/* Value of Sub Category ID of WPS Primary Device Type Attribute */
-#define WPS_PDT_SCID_MEDIA_SERVER 0x0005
-#define WPS_PDT_SCID_RTK_DMP WPS_PDT_SCID_MEDIA_SERVER
-
-/* Value of Device Password ID */
-#define WPS_DPID_P 0x0000
-#define WPS_DPID_USER_SPEC 0x0001
-#define WPS_DPID_MACHINE_SPEC 0x0002
-#define WPS_DPID_REKEY 0x0003
-#define WPS_DPID_PBC 0x0004
-#define WPS_DPID_REGISTRAR_SPEC 0x0005
-
-/* Value of WPS RF Bands Attribute */
-#define WPS_RF_BANDS_2_4_GHZ 0x01
-#define WPS_RF_BANDS_5_GHZ 0x02
-
-/* Value of WPS Association State Attribute */
-#define WPS_ASSOC_STATE_NOT_ASSOCIATED 0x00
-#define WPS_ASSOC_STATE_CONNECTION_SUCCESS 0x01
-#define WPS_ASSOC_STATE_CONFIGURATION_FAILURE 0x02
-#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
-#define WPS_ASSOC_STATE_IP_FAILURE 0x04
-
-/* =====================P2P Section===================== */
-/* For P2P */
-#define P2POUI 0x506F9A09
-
-/* P2P Attribute ID */
-#define P2P_ATTR_STATUS 0x00
-#define P2P_ATTR_MINOR_REASON_CODE 0x01
-#define P2P_ATTR_CAPABILITY 0x02
-#define P2P_ATTR_DEVICE_ID 0x03
-#define P2P_ATTR_GO_INTENT 0x04
-#define P2P_ATTR_CONF_TIMEOUT 0x05
-#define P2P_ATTR_LISTEN_CH 0x06
-#define P2P_ATTR_GROUP_BSSID 0x07
-#define P2P_ATTR_EX_LISTEN_TIMING 0x08
-#define P2P_ATTR_INTENTED_IF_ADDR 0x09
-#define P2P_ATTR_MANAGEABILITY 0x0A
-#define P2P_ATTR_CH_LIST 0x0B
-#define P2P_ATTR_NOA 0x0C
-#define P2P_ATTR_DEVICE_INFO 0x0D
-#define P2P_ATTR_GROUP_INFO 0x0E
-#define P2P_ATTR_GROUP_ID 0x0F
-#define P2P_ATTR_INTERFACE 0x10
-#define P2P_ATTR_OPERATING_CH 0x11
-#define P2P_ATTR_INVITATION_FLAGS 0x12
-
-/* Value of Status Attribute */
-#define P2P_STATUS_SUCCESS 0x00
-#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
-#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
-#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
-#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
-#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
-#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
-#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
-#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
-#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
-#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
-#define P2P_STATUS_FAIL_USER_REJECT 0x0B
-
-/* Value of Inviation Flags Attribute */
-#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
-
-#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
- P2P_DEVCAP_CLIENT_DISCOVERABILITY | \
- P2P_DEVCAP_CONCURRENT_OPERATION | \
- P2P_DEVCAP_INVITATION_PROC)
-
-#define DMP_P2P_GRPCAP_SUPPORT (P2P_GRPCAP_INTRABSS)
-
-/* Value of Device Capability Bitmap */
-#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
-#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
-#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
-#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
-#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
-#define P2P_DEVCAP_INVITATION_PROC BIT(5)
-
-/* Value of Group Capability Bitmap */
-#define P2P_GRPCAP_GO BIT(0)
-#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
-#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
-#define P2P_GRPCAP_INTRABSS BIT(3)
-#define P2P_GRPCAP_CROSS_CONN BIT(4)
-#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
-#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
-
-/* P2P Public Action Frame (Management Frame) */
-#define P2P_PUB_ACTION_ACTION 0x09
-
-/* P2P Public Action Frame Type */
-#define P2P_GO_NEGO_REQ 0
-#define P2P_GO_NEGO_RESP 1
-#define P2P_GO_NEGO_CONF 2
-#define P2P_INVIT_REQ 3
-#define P2P_INVIT_RESP 4
-#define P2P_DEVDISC_REQ 5
-#define P2P_DEVDISC_RESP 6
-#define P2P_PROVISION_DISC_REQ 7
-#define P2P_PROVISION_DISC_RESP 8
-
-/* P2P Action Frame Type */
-#define P2P_NOTICE_OF_ABSENCE 0
-#define P2P_PRESENCE_REQUEST 1
-#define P2P_PRESENCE_RESPONSE 2
-#define P2P_GO_DISC_REQUEST 3
-
-#define P2P_MAX_PERSISTENT_GROUP_NUM 10
-
-#define P2P_PROVISIONING_SCAN_CNT 3
-
-#define P2P_WILDCARD_SSID_LEN 7
-
-/* default value, used when: (1)p2p disabled or (2)p2p enabled
- * but only do 1 scan phase */
-#define P2P_FINDPHASE_EX_NONE 0
-/* used when p2p enabled and want to do 1 scan phase and
- * P2P_FINDPHASE_EX_MAX-1 find phase */
-#define P2P_FINDPHASE_EX_FULL 1
-#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
-#define P2P_FINDPHASE_EX_MAX 4
-#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
-
-/* 5 seconds timeout for sending the provision discovery request */
-#define P2P_PROVISION_TIMEOUT 5000
-/* 3 seconds timeout for sending the prov disc request concurrent mode */
-#define P2P_CONCURRENT_PROVISION_TIME 3000
-/* 5 seconds timeout for receiving the group negotiation response */
-#define P2P_GO_NEGO_TIMEOUT 5000
-/* 3 seconds timeout for sending the negotiation request under concurrent mode */
-#define P2P_CONCURRENT_GO_NEGO_TIME 3000
-/* 100ms */
-#define P2P_TX_PRESCAN_TIMEOUT 100
-/* 5 seconds timeout for sending the invitation request */
-#define P2P_INVITE_TIMEOUT 5000
-/* 3 seconds timeout for sending the invitation request under concurrent mode */
-#define P2P_CONCURRENT_INVITE_TIME 3000
-/* 25 seconds timeout to reset the scan channel (based on channel plan) */
-#define P2P_RESET_SCAN_CH 25000
-#define P2P_MAX_INTENT 15
-
-#define P2P_MAX_NOA_NUM 2
-
-/* WPS Configuration Method */
-#define WPS_CM_NONE 0x0000
-#define WPS_CM_LABEL 0x0004
-#define WPS_CM_DISPLYA 0x0008
-#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
-#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
-#define WPS_CM_NFC_INTERFACE 0x0040
-#define WPS_CM_PUSH_BUTTON 0x0080
-#define WPS_CM_KEYPAD 0x0100
-#define WPS_CM_SW_PUHS_BUTTON 0x0280
-#define WPS_CM_HW_PUHS_BUTTON 0x0480
-#define WPS_CM_SW_DISPLAY_P 0x2008
-#define WPS_CM_LCD_DISPLAY_P 0x4008
-
-enum P2P_ROLE {
- P2P_ROLE_DISABLE = 0,
- P2P_ROLE_DEVICE = 1,
- P2P_ROLE_CLIENT = 2,
- P2P_ROLE_GO = 3
-};
-
-enum P2P_STATE {
- P2P_STATE_NONE = 0, /* P2P disable */
- /* P2P had enabled and do nothing */
- P2P_STATE_IDLE = 1,
- P2P_STATE_LISTEN = 2, /* In pure listen state */
- P2P_STATE_SCAN = 3, /* In scan phase */
- /* In the listen state of find phase */
- P2P_STATE_FIND_PHASE_LISTEN = 4,
- /* In the search state of find phase */
- P2P_STATE_FIND_PHASE_SEARCH = 5,
- /* In P2P provisioning discovery */
- P2P_STATE_TX_PROVISION_DIS_REQ = 6,
- P2P_STATE_RX_PROVISION_DIS_RSP = 7,
- P2P_STATE_RX_PROVISION_DIS_REQ = 8,
- /* Doing the group owner negotiation handshake */
- P2P_STATE_GONEGO_ING = 9,
- /* finish the group negotiation handshake with success */
- P2P_STATE_GONEGO_OK = 10,
- /* finish the group negotiation handshake with failure */
- P2P_STATE_GONEGO_FAIL = 11,
- /* receiving the P2P Inviation request and match with the profile. */
- P2P_STATE_RECV_INVITE_REQ_MATCH = 12,
- /* Doing the P2P WPS */
- P2P_STATE_PROVISIONING_ING = 13,
- /* Finish the P2P WPS */
- P2P_STATE_PROVISIONING_DONE = 14,
- /* Transmit the P2P Invitation request */
- P2P_STATE_TX_INVITE_REQ = 15,
- /* Receiving the P2P Invitation response */
- P2P_STATE_RX_INVITE_RESP_OK = 16,
- /* receiving the P2P Inviation request and dismatch with the profile. */
- P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17,
- /* receiving the P2P Inviation request and this wifi is GO. */
- P2P_STATE_RECV_INVITE_REQ_GO = 18,
- /* receiving the P2P Inviation request to join an existing P2P Group. */
- P2P_STATE_RECV_INVITE_REQ_JOIN = 19,
- /* recveing the P2P Inviation response with failure */
- P2P_STATE_RX_INVITE_RESP_FAIL = 20,
- /* receiving p2p negotiation response with information is not available */
- P2P_STATE_RX_INFOR_NOREADY = 21,
- /* sending p2p negotiation response with information is not available */
- P2P_STATE_TX_INFOR_NOREADY = 22,
-};
-
-enum P2P_WPSINFO {
- P2P_NO_WPSINFO = 0,
- P2P_GOT_WPSINFO_PEER_DISPLAY_PIN = 1,
- P2P_GOT_WPSINFO_SELF_DISPLAY_PIN = 2,
- P2P_GOT_WPSINFO_PBC = 3,
-};
-
-#define P2P_PRIVATE_IOCTL_SET_LEN 64
-
-enum P2P_PROTO_WK_ID {
- P2P_FIND_PHASE_WK = 0,
- P2P_RESTORE_STATE_WK = 1,
- P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
- P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
- P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
- P2P_RO_CH_WK = 6,
-};
-
-enum P2P_PS_STATE {
- P2P_PS_DISABLE = 0,
- P2P_PS_ENABLE = 1,
- P2P_PS_SCAN = 2,
- P2P_PS_SCAN_DONE = 3,
- P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
-};
-
-enum P2P_PS_MODE {
- P2P_PS_NONE = 0,
- P2P_PS_CTWINDOW = 1,
- P2P_PS_NOA = 2,
- P2P_PS_MIX = 3, /* CTWindow and NoA */
-};
-
-#define IP_MCAST_MAC(mac) \
- ((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
-#define ICMPV6_MCAST_MAC(mac) \
- ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] != 0xff))
-
-#endif /* _WIFI_H_ */
diff --git a/drivers/staging/r8188eu/include/wlan_bssdef.h b/drivers/staging/r8188eu/include/wlan_bssdef.h
deleted file mode 100644
index ffeafa19ef26..000000000000
--- a/drivers/staging/r8188eu/include/wlan_bssdef.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __WLAN_BSSDEF_H__
-#define __WLAN_BSSDEF_H__
-
-#define MAX_IE_SZ 768
-
-#define NDIS_802_11_LENGTH_SSID 32
-#define NDIS_802_11_LENGTH_RATES 8
-#define NDIS_802_11_LENGTH_RATES_EX 16
-
-#define NDIS_802_11_RSSI long /* in dBm */
-
-struct ndis_802_11_ssid {
- u32 SsidLength;
- u8 Ssid[32];
-};
-
-struct ndis_802_11_config_fh {
- u32 Length; /* Length of structure */
- u32 HopPattern; /* As defined by 802.11, MSB set */
- u32 HopSet; /* to one if non-802.11 */
- u32 DwellTime; /* units are Kusec */
-};
-
-/*
- * FW will only save the channel number in DSConfig.
- * ODI Handler will convert the channel number to freq. number.
- */
-struct ndis_802_11_config {
- u32 Length; /* Length of structure */
- u32 BeaconPeriod; /* units are Kusec */
- u32 ATIMWindow; /* units are Kusec */
- u32 DSConfig; /* Frequency, units are kHz */
- struct ndis_802_11_config_fh FHConfig;
-};
-
-enum ndis_802_11_network_infra {
- Ndis802_11IBSS,
- Ndis802_11Infrastructure,
- Ndis802_11AutoUnknown,
- Ndis802_11InfrastructureMax, /* dummy upper bound */
- Ndis802_11APMode
-};
-
-struct ndis_802_11_fixed_ie {
- u8 Timestamp[8];
- u16 BeaconInterval;
- u16 Capabilities;
-};
-
-struct ndis_802_11_var_ie {
- u8 ElementID;
- u8 Length;
- u8 data[];
-};
-
-/*
- * Length is the 4 bytes multiples of the sume of
- * [ETH_ALEN] + 2 + sizeof (struct ndis_802_11_ssid) + sizeof (u32)
- * + sizeof (NDIS_802_11_RSSI) + sizeof (enum NDIS_802_11_NETWORK_TYPE)
- * + sizeof (struct ndis_802_11_config)
- * + NDIS_802_11_LENGTH_RATES_EX + IELength
- *
- * Except the IELength, all other fields are fixed length.
- * Therefore, we can define a macro to represent the partial sum. */
-
-enum ndis_802_11_auth_mode {
- Ndis802_11AuthModeOpen,
- Ndis802_11AuthModeShared,
- Ndis802_11AuthModeAutoSwitch,
- Ndis802_11AuthModeWPA,
- Ndis802_11AuthModeWPAPSK,
- Ndis802_11AuthModeWPANone,
- Ndis802_11AuthModeWAPI,
- Ndis802_11AuthModeMax /* Not a real mode, upper bound */
-};
-
-enum ndis_802_11_wep_status {
- Ndis802_11WEPEnabled,
- Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
- Ndis802_11WEPDisabled,
- Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
- Ndis802_11WEPKeyAbsent,
- Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
- Ndis802_11WEPNotSupported,
- Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
- Ndis802_11Encryption2Enabled,
- Ndis802_11Encryption2KeyAbsent,
- Ndis802_11Encryption3Enabled,
- Ndis802_11Encryption3KeyAbsent,
- Ndis802_11_EncryptionWAPI
-};
-
-#define NDIS_802_11_AI_REQFI_CAPABILITIES 1
-#define NDIS_802_11_AI_REQFI_LISTENINTERVAL 2
-#define NDIS_802_11_AI_REQFI_CURRENTAPADDRESS 4
-
-#define NDIS_802_11_AI_RESFI_CAPABILITIES 1
-#define NDIS_802_11_AI_RESFI_STATUSCODE 2
-#define NDIS_802_11_AI_RESFI_ASSOCIATIONID 4
-
-struct ndis_802_11_ai_reqfi {
- u16 Capabilities;
- u16 ListenInterval;
- unsigned char CurrentAPAddress[ETH_ALEN];
-};
-
-struct ndis_802_11_ai_resfi {
- u16 Capabilities;
- u16 StatusCode;
- u16 AssociationId;
-};
-
-struct ndis_802_11_assoc_info {
- u32 Length;
- u16 AvailableRequestFixedIEs;
- struct ndis_802_11_ai_reqfi RequestFixedIEs;
- u32 RequestIELength;
- u32 OffsetRequestIEs;
- u16 AvailableResponseFixedIEs;
- struct ndis_802_11_ai_resfi ResponseFixedIEs;
- u32 ResponseIELength;
- u32 OffsetResponseIEs;
-};
-
-/* Key mapping keys require a BSSID */
-struct ndis_802_11_key {
- u32 Length; /* Length of this structure */
- u32 KeyIndex;
- u32 KeyLength; /* length of key in bytes */
- unsigned char BSSID[ETH_ALEN];
- unsigned long long KeyRSC;
- u8 KeyMaterial[32]; /* var len depending on above field */
-};
-
-struct ndis_802_11_remove_key {
- u32 Length; /* Length */
- u32 KeyIndex;
- unsigned char BSSID[ETH_ALEN];
-};
-
-struct ndis_802_11_wep {
- u32 Length; /* Length of this structure */
- u32 KeyIndex; /* 0 is the per-client key,
- * 1-N are the global keys */
- u32 KeyLength; /* length of key in bytes */
- u8 KeyMaterial[16];/* variable len depending on above field */
-};
-
-struct ndis_802_11_auth_req {
- u32 Length; /* Length of structure */
- unsigned char Bssid[ETH_ALEN];
- u32 Flags;
-};
-
-enum ndis_802_11_status_type {
- Ndis802_11StatusType_Authentication,
- Ndis802_11StatusType_MediaStreamMode,
- Ndis802_11StatusType_PMKID_CandidateList,
- Ndis802_11StatusTypeMax /* not a real type, defined as
- * an upper bound */
-};
-
-struct ndis_802_11_status_ind {
- enum ndis_802_11_status_type StatusType;
-};
-
-/* mask for authentication/integrity fields */
-#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
-#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
-#define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02
-#define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06
-#define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E
-
-/* MIC check time, 60 seconds. */
-#define MIC_CHECK_TIME 60000000
-
-#ifndef Ndis802_11APMode
-#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
-#endif
-
-struct wlan_phy_info {
- u8 SignalStrength;/* in percentage) */
- u8 SignalQuality;/* in percentage) */
- u8 Optimum_antenna; /* for Antenna diversity */
- u8 Reserved_0;
-};
-
-struct wlan_bcn_info {
- /* these infor get from rtw_get_encrypt_info when
- * * translate scan to UI */
- u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
- int group_cipher; /* WPA/WPA2 group cipher */
- int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
- int is_8021x;
-
- /* bwmode 20/40 and ch_offset UP/LOW */
- unsigned short ht_cap_info;
- unsigned char ht_info_infos_0;
-};
-
-/* temporally add #pragma pack for structure alignment issue of
-* struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
-*/
-struct wlan_bssid_ex {
- u32 Length;
- unsigned char MacAddress[ETH_ALEN];
- u8 Reserved[2];/* 0]: IS beacon frame */
- struct ndis_802_11_ssid Ssid;
- u32 Privacy;
- NDIS_802_11_RSSI Rssi;/* in dBM,raw data ,get from PHY) */
- struct ndis_802_11_config Configuration;
- enum ndis_802_11_network_infra InfrastructureMode;
- unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
- struct wlan_phy_info PhyInfo;
- u32 IELength;
- u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
- * capability information) */
-} __packed;
-
-static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
-{
- return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
-}
-
-struct wlan_network {
- struct list_head list;
- int network_type; /* refer to ieee80211.h for WIRELESS_11B/G */
- int fixed; /* set fixed when not to be removed
- * in site-surveying */
- unsigned long last_scanned; /* timestamp for the network */
- int aid; /* will only be valid when a BSS is joinned. */
- int join_res;
- struct wlan_bssid_ex network; /* must be the last item */
- struct wlan_bcn_info BcnInfo;
-};
-
-enum VRTL_CARRIER_SENSE {
- DISABLE_VCS,
- ENABLE_VCS,
- AUTO_VCS
-};
-
-enum VCS_TYPE {
- NONE_VCS,
- RTS_CTS,
- CTS_TO_SELF
-};
-
-#define PWR_CAM 0
-#define PWR_MINPS 1
-#define PWR_MAXPS 2
-#define PWR_UAPSD 3
-#define PWR_VOIP 4
-
-enum UAPSD_MAX_SP {
- NO_LIMIT,
- TWO_MSDU,
- FOUR_MSDU,
- SIX_MSDU
-};
-
-#define NUM_PRE_AUTH_KEY 16
-#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
-
-u8 key_2char2num(u8 hch, u8 lch);
-u8 key_char2num(u8 ch);
-u8 str_2char2num(u8 hch, u8 lch);
-
-#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
deleted file mode 100644
index e0a819970546..000000000000
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ /dev/null
@@ -1,3775 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wlan_bssdef.h"
-#include "../include/wifi.h"
-#include "../include/rtw_mlme.h"
-#include "../include/rtw_mlme_ext.h"
-#include "../include/rtw_ioctl.h"
-#include "../include/rtw_ioctl_set.h"
-#include "../include/usb_ops.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtw_led.h"
-
-#include "../include/rtw_iol.h"
-
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
-
-#define SCAN_ITEM_SIZE 768
-#define MAX_CUSTOM_LEN 64
-#define RATE_COUNT 4
-
-/* combo scan */
-#define WEXT_CSCAN_AMOUNT 9
-#define WEXT_CSCAN_BUF_LEN 360
-#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
-#define WEXT_CSCAN_HEADER_SIZE 12
-#define WEXT_CSCAN_SSID_SECTION 'S'
-#define WEXT_CSCAN_CHANNEL_SECTION 'C'
-#define WEXT_CSCAN_NPROBE_SECTION 'N'
-#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
-#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
-#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
-#define WEXT_CSCAN_TYPE_SECTION 'T'
-
-static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
- 6000000, 9000000, 12000000, 18000000, 24000000, 36000000,
- 48000000, 54000000};
-
-void indicate_wx_scan_complete_event(struct adapter *padapter)
-{
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(union iwreq_data));
- wireless_send_event(padapter->pnetdev, SIOCGIWSCAN, &wrqu, NULL);
-}
-
-void rtw_indicate_wx_assoc_event(struct adapter *padapter)
-{
- union iwreq_data wrqu;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- memset(&wrqu, 0, sizeof(union iwreq_data));
-
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
-}
-
-void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
-{
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(union iwreq_data));
-
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static char *translate_scan(struct adapter *padapter,
- struct iw_request_info *info,
- struct wlan_network *pnetwork,
- char *start, char *stop)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct iw_event iwe;
- u16 cap;
- __le16 le_tmp;
- u32 ht_ielen = 0;
- char *custom;
- char *p;
- u16 max_rate = 0, rate, ht_cap = false;
- u32 i = 0;
- u8 bw_40MHz = 0, short_GI = 0;
- u16 mcs_rate = 0;
- u8 ss, sq;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- u32 blnGotP2PIE = false;
-
- /* User is doing the P2P device discovery */
- /* The prefix of SSID should be "DIRECT-" and the IE should contains the P2P IE. */
- /* If not, the driver should ignore this AP and go to the next AP. */
-
- /* Verifying the SSID */
- if (!memcmp(pnetwork->network.Ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN)) {
- u32 p2pielen = 0;
-
- if (pnetwork->network.Reserved[0] == 2) {/* Probe Request */
- /* Verifying the P2P IE */
- if (rtw_get_p2p_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &p2pielen))
- blnGotP2PIE = true;
- } else {/* Beacon or Probe Respones */
- /* Verifying the P2P IE */
- if (rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen))
- blnGotP2PIE = true;
- }
- }
-
- if (!blnGotP2PIE)
- return start;
- }
-
- /* AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-
- memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN);
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
-
- /* Add the ESSID */
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- iwe.u.data.length = min_t(u16, pnetwork->network.Ssid.SsidLength, 32);
- start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
-
- /* parsing HT_CAP_IE */
- p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength - 12);
-
- if (p && ht_ielen > 0) {
- struct ieee80211_ht_cap *pht_capie;
-
- ht_cap = true;
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- memcpy(&mcs_rate, pht_capie->mcs.rx_mask, 2);
- bw_40MHz = (le16_to_cpu(pht_capie->cap_info) &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
- short_GI = (le16_to_cpu(pht_capie->cap_info) &
- (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
- }
-
- /* Add the protocol name */
- iwe.cmd = SIOCGIWNAME;
- if ((rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates))) {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
- } else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates))) {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
- } else {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
- }
-
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
-
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
-
- cap = le16_to_cpu(le_tmp);
-
- if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
- if (cap & WLAN_CAPABILITY_BSS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
-
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
- }
-
- if (pnetwork->network.Configuration.DSConfig < 1)
- pnetwork->network.Configuration.DSConfig = 1;
-
- /* Add frequency/channel */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = rtw_ch2freq(pnetwork->network.Configuration.DSConfig) * 100000;
- iwe.u.freq.e = 1;
- iwe.u.freq.i = pnetwork->network.Configuration.DSConfig;
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (cap & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
-
- /*Add basic and extended rates */
- max_rate = 0;
- custom = kzalloc(MAX_CUSTOM_LEN, GFP_ATOMIC);
- if (!custom)
- return start;
- p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
- while (pnetwork->network.SupportedRates[i] != 0) {
- rate = pnetwork->network.SupportedRates[i] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- i++;
- }
-
- if (ht_cap) {
- if (mcs_rate & 0x8000)/* MCS15 */
- max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) : ((short_GI) ? 144 : 130);
- else if (mcs_rate & 0x0080)/* MCS7 */
- ;
- else/* default MCS7 */
- max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65);
-
- max_rate = max_rate * 2;/* Mbps/2; */
- }
-
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = 0;
- iwe.u.bitrate.disabled = 0;
- iwe.u.bitrate.value = max_rate * 500000;
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
-
- /* parsing WPA/WPA2 IE */
- {
- u8 *buf;
- u8 *wpa_ie, *rsn_ie;
- u16 wpa_len = 0, rsn_len = 0;
- u8 *p;
-
- buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
- if (!buf)
- goto exit;
- wpa_ie = kzalloc(255, GFP_ATOMIC);
- if (!wpa_ie) {
- kfree(buf);
- goto exit;
- }
- rsn_ie = kzalloc(255, GFP_ATOMIC);
- if (!rsn_ie) {
- kfree(buf);
- kfree(wpa_ie);
- goto exit;
- }
- rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
-
- if (wpa_len > 0) {
- p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
- p += sprintf(p, "wpa_ie =");
- for (i = 0; i < wpa_len; i++)
- p += sprintf(p, "%02x", wpa_ie[i]);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = wpa_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie);
- }
- if (rsn_len > 0) {
- p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
- p += sprintf(p, "rsn_ie =");
- for (i = 0; i < rsn_len; i++)
- p += sprintf(p, "%02x", rsn_ie[i]);
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = rsn_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
- }
- kfree(buf);
- kfree(wpa_ie);
- kfree(rsn_ie);
- }
-
- {/* parsing WPS IE */
- uint cnt = 0, total_ielen;
- u8 *wpsie_ptr = NULL;
- uint wps_ielen = 0;
-
- u8 *ie_ptr = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
- total_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
-
- while (cnt < total_ielen) {
- if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
- wpsie_ptr = &ie_ptr[cnt];
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = (u16)wps_ielen;
- start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr);
- }
- cnt += ie_ptr[cnt + 1] + 2; /* goto next */
- }
- }
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network)) {
- ss = padapter->recvpriv.signal_strength;
- sq = padapter->recvpriv.signal_qual;
- } else {
- ss = pnetwork->network.PhyInfo.SignalStrength;
- sq = pnetwork->network.PhyInfo.SignalQuality;
- }
-
- iwe.u.qual.level = (u8)ss;
- iwe.u.qual.qual = (u8)sq; /* signal quality */
- iwe.u.qual.noise = 0; /* noise level */
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
-exit:
- kfree(custom);
- return start;
-}
-
-static int wpa_set_auth_algs(struct net_device *dev, u32 value)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int ret = 0;
-
- if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
- } else if (value & AUTH_ALG_SHARED_KEY) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
-
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
- } else if (value & AUTH_ALG_OPEN_SYSTEM) {
- if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- }
- } else if (!(value & AUTH_ALG_LEAP)) {
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
-{
- int ret = 0;
- u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
-
- wep_key_idx = param->u.crypt.idx;
- wep_key_len = param->u.crypt.key_len;
-
- if (wep_key_idx > WEP_KEYS)
- return -EINVAL;
-
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + sizeof(*pwep);
- pwep = kzalloc(wep_total_len, GFP_KERNEL);
- if (!pwep)
- goto exit;
-
- pwep->KeyLength = wep_key_len;
- pwep->Length = wep_total_len;
- if (wep_key_len == 13) {
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- }
- } else {
- ret = -EINVAL;
- goto exit;
- }
- pwep->KeyIndex = wep_key_idx;
- pwep->KeyIndex |= 0x80000000;
- memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
- if (param->u.crypt.set_tx) {
- if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
- ret = -EOPNOTSUPP;
- } else {
- if (wep_key_idx >= WEP_KEYS) {
- ret = -EOPNOTSUPP;
- goto exit;
- }
- memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
- psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
- rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
- }
- goto exit;
- }
-
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
- struct sta_info *psta, *pbcmc_sta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE)) { /* sta mode */
- psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (!psta) {
- ;
- } else {
- if (strcmp(param->u.crypt.alg, "none") != 0)
- psta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
- psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
-
- if (param->u.crypt.set_tx == 1) { /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
-
- if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
- padapter->securitypriv.busetkipkey = false;
- }
-
- rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
- } else { /* group key */
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
- padapter->securitypriv.binstallGrpkey = true;
-
- padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
-
- rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1);
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
- rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE);
- }
- }
- pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (!pbcmc_sta) {
- ;
- } else {
- /* Jeff: don't disable ieee8021x_blocked while clearing key */
- if (strcmp(param->u.crypt.alg, "none") != 0)
- pbcmc_sta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
- pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
- }
- }
- }
-
-exit:
-
- kfree(pwep);
-
- return ret;
-}
-
-static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
-{
- u8 *buf = NULL;
- int group_cipher = 0, pairwise_cipher = 0;
- int ret = 0;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (ielen > MAX_WPA_IE_LEN || !pie) {
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (!pie)
- return ret;
- else
- return -EINVAL;
- }
-
- if (ielen) {
- buf = kmemdup(pie, ielen, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto exit;
- }
-
- if (ielen < RSN_HEADER_LEN) {
- ret = -1;
- goto exit;
- }
-
- if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
- memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
- }
-
- switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- }
-
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- {/* set wps_ie */
- u16 cnt = 0;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- while (cnt < ielen) {
- eid = buf[cnt];
- if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
- padapter->securitypriv.wps_ie_len = ((buf[cnt + 1] + 2) < (MAX_WPA_IE_LEN << 2)) ? (buf[cnt + 1] + 2) : (MAX_WPA_IE_LEN << 2);
-
- memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
-
- set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_OK))
- rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_ING);
- cnt += buf[cnt + 1] + 2;
- break;
- } else {
- cnt += buf[cnt + 1] + 2; /* goto next */
- }
- }
- }
- }
-
-exit:
- kfree(buf);
- return ret;
-}
-
-typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
-
-static int rtw_wx_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u32 ht_ielen = 0;
- char *p;
- u8 ht_cap = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- NDIS_802_11_RATES_EX *prates = NULL;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- /* parsing HT_CAP_IE */
- p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength - 12);
- if (p && ht_ielen > 0)
- ht_cap = true;
-
- prates = &pcur_bss->SupportedRates;
-
- if (rtw_is_cckratesonly_included((u8 *)prates)) {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
- } else if (rtw_is_cckrates_included((u8 *)prates)) {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
- } else {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
- }
- } else {
- snprintf(wrqu->name, IFNAMSIZ, "unassociated");
- }
-
-
-
- return 0;
-}
-
-static int rtw_wx_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- /* wrqu->freq.m = ieee80211_wlan_frequencies[pcur_bss->Configuration.DSConfig-1] * 100000; */
- wrqu->freq.m = rtw_ch2freq(pcur_bss->Configuration.DSConfig) * 100000;
- wrqu->freq.e = 1;
- wrqu->freq.i = pcur_bss->Configuration.DSConfig;
- } else {
- wrqu->freq.m = rtw_ch2freq(padapter->mlmeextpriv.cur_channel) * 100000;
- wrqu->freq.e = 1;
- wrqu->freq.i = padapter->mlmeextpriv.cur_channel;
- }
-
- return 0;
-}
-
-static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- enum ndis_802_11_network_infra networkType;
- int ret = 0;
-
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- goto exit;
-
- if (!padapter->hw_init_completed) {
- ret = -EPERM;
- goto exit;
- }
-
- switch (wrqu->mode) {
- case IW_MODE_AUTO:
- networkType = Ndis802_11AutoUnknown;
- break;
- case IW_MODE_ADHOC:
- networkType = Ndis802_11IBSS;
- break;
- case IW_MODE_MASTER:
- networkType = Ndis802_11APMode;
- break;
- case IW_MODE_INFRA:
- networkType = Ndis802_11Infrastructure;
- break;
- default:
- ret = -EINVAL;
- goto exit;
- }
- if (!rtw_set_802_11_infrastructure_mode(padapter, networkType)) {
- ret = -EPERM;
- goto exit;
- }
- rtw_setopmode_cmd(padapter, networkType);
-exit:
-
- return ret;
-}
-
-static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- wrqu->mode = IW_MODE_INFRA;
- else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
- wrqu->mode = IW_MODE_ADHOC;
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- wrqu->mode = IW_MODE_MASTER;
- else
- wrqu->mode = IW_MODE_AUTO;
-
-
-
- return 0;
-}
-
-static int rtw_wx_set_pmkid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 j, blInserted = false;
- int ret = false;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct iw_pmksa *pPMK = (struct iw_pmksa *)extra;
- u8 strZeroMacAddress[ETH_ALEN] = {0x00};
- u8 strIssueBssid[ETH_ALEN] = {0x00};
-
- memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
- if (pPMK->cmd == IW_PMKSA_ADD) {
- if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
- return ret;
- else
- ret = true;
- blInserted = false;
-
- /* overwrite PMKID */
- for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
- /* BSSID is matched, the same AP => rewrite with new PMKID. */
- memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[j].bUsed = true;
- psecuritypriv->PMKIDIndex = j + 1;
- blInserted = true;
- break;
- }
- }
-
- if (!blInserted) {
- /* Find a new entry */
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
-
- psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
- psecuritypriv->PMKIDIndex++;
- if (psecuritypriv->PMKIDIndex == 16)
- psecuritypriv->PMKIDIndex = 0;
- }
- } else if (pPMK->cmd == IW_PMKSA_REMOVE) {
- ret = true;
- for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
- /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
- memset(psecuritypriv->PMKIDList[j].Bssid, 0x00, ETH_ALEN);
- psecuritypriv->PMKIDList[j].bUsed = false;
- break;
- }
- }
- } else if (pPMK->cmd == IW_PMKSA_FLUSH) {
- memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- psecuritypriv->PMKIDIndex = 0;
- ret = true;
- }
- return ret;
-}
-
-static int rtw_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wrqu->sens.value = 0;
- wrqu->sens.fixed = 0; /* no auto select */
- wrqu->sens.disabled = 1;
- return 0;
-}
-
-static int rtw_wx_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- u16 val;
- int i;
-
- wrqu->data.length = sizeof(*range);
- memset(range, 0, sizeof(*range));
-
- /* Let's try to keep this struct in the same order as in
- * linux/include/wireless.h
- */
-
- /* TODO: See what values we can set, and remove the ones we can't
- * set, or fill them with some default data.
- */
-
- /* ~5 Mb/s real (802.11b) */
- range->throughput = 5 * 1000 * 1000;
-
- /* signal level threshold range */
-
- /* percent values between 0 and 100. */
- range->max_qual.qual = 100;
- range->max_qual.level = 100;
- range->max_qual.noise = 100;
- range->max_qual.updated = 7; /* Updated all three */
-
- range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
- /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
- range->avg_qual.level = 178; /* -78 dBm */
- range->avg_qual.noise = 0;
- range->avg_qual.updated = 7; /* Updated all three */
-
- range->num_bitrates = RATE_COUNT;
-
- for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
- range->bitrate[i] = rtw_rates[i];
-
- range->min_frag = MIN_FRAG_THRESHOLD;
- range->max_frag = MAX_FRAG_THRESHOLD;
-
- range->pm_capa = 0;
-
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 16;
-
- for (i = 0, val = 0; i < MAX_CHANNEL_NUM; i++) {
- /* Include only legal frequencies for some countries */
- if (pmlmeext->channel_set[i].ChannelNum != 0) {
- range->freq[val].i = pmlmeext->channel_set[i].ChannelNum;
- range->freq[val].m = rtw_ch2freq(pmlmeext->channel_set[i].ChannelNum) * 100000;
- range->freq[val].e = 1;
- val++;
- }
-
- if (val == IW_MAX_FREQUENCIES)
- break;
- }
-
- range->num_channels = val;
- range->num_frequency = val;
-
-/* The following code will proivde the security capability to network manager. */
-/* If the driver doesn't provide this capability to network manager, */
-/* the WPA/WPA2 routers can't be chosen in the network manager. */
-
-/*
-#define IW_SCAN_CAPA_NONE 0x00
-#define IW_SCAN_CAPA_ESSID 0x01
-#define IW_SCAN_CAPA_BSSID 0x02
-#define IW_SCAN_CAPA_CHANNEL 0x04
-#define IW_SCAN_CAPA_MODE 0x08
-#define IW_SCAN_CAPA_RATE 0x10
-#define IW_SCAN_CAPA_TYPE 0x20
-#define IW_SCAN_CAPA_TIME 0x40
-*/
-
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
-
- range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |
- IW_SCAN_CAPA_BSSID | IW_SCAN_CAPA_CHANNEL |
- IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
-
-
- return 0;
-}
-
-/* set bssid flow */
-/* s1. rtw_set_802_11_infrastructure_mode() */
-/* s2. rtw_set_802_11_authentication_mode() */
-/* s3. set_802_11_encryption_mode() */
-/* s4. rtw_set_802_11_bssid() */
-static int rtw_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
- uint ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct sockaddr *temp = (struct sockaddr *)awrq;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *phead;
- u8 *dst_bssid, *src_bssid;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- enum ndis_802_11_auth_mode authmode;
-
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- goto exit;
-
- if (!padapter->bup) {
- ret = -1;
- goto exit;
- }
-
- if (temp->sa_family != ARPHRD_ETHER) {
- ret = -EINVAL;
- goto exit;
- }
-
- authmode = padapter->securitypriv.ndisauthtype;
- spin_lock_bh(&queue->lock);
- phead = get_list_head(queue);
- pmlmepriv->pscanned = phead->next;
-
- while (phead != pmlmepriv->pscanned) {
-
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
-
- dst_bssid = pnetwork->network.MacAddress;
-
- src_bssid = temp->sa_data;
-
- if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
- if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
- ret = -1;
- spin_unlock_bh(&queue->lock);
- goto exit;
- }
-
- break;
- }
- }
- spin_unlock_bh(&queue->lock);
-
- rtw_set_802_11_authentication_mode(padapter, authmode);
- /* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
- if (!rtw_set_802_11_bssid(padapter, temp->sa_data)) {
- ret = -1;
- goto exit;
- }
-
-exit:
-
-
-
- return ret;
-}
-
-static int rtw_wx_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
-
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE))
- memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
- else
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
-
-
-
- return 0;
-}
-
-static int rtw_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
-
- if (!mlme)
- return -1;
-
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- rtw_set_802_11_disassociate(padapter);
- break;
- case IW_MLME_DISASSOC:
- rtw_set_802_11_disassociate(padapter);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- u8 _status = false;
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- goto exit;
-
- if (padapter->bDriverStopped) {
- ret = -1;
- goto exit;
- }
-
- if (!padapter->bup) {
- ret = -1;
- goto exit;
- }
-
- if (!padapter->hw_init_completed) {
- ret = -1;
- goto exit;
- }
-
- /* When Busy Traffic, driver do not site survey. So driver return success. */
- /* wpa_supplicant will not issue SIOCSIWSCAN cmd again after scan timeout. */
- /* modify by thomas 2011-02-22. */
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
- indicate_wx_scan_complete_event(padapter);
- goto exit;
- }
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING)) {
- indicate_wx_scan_complete_event(padapter);
- goto exit;
- }
-
-/* For the DMP WiFi Display project, the driver won't to scan because */
-/* the pmlmepriv->scan_interval is always equal to 3. */
-/* So, the wpa_supplicant won't find out the WPS SoftAP. */
-
- if (pwdinfo->p2p_state != P2P_STATE_NONE) {
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
- rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
- rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_FULL);
- rtw_free_network_queue(padapter, true);
- }
-
- memset(ssid, 0, sizeof(struct ndis_802_11_ssid) * RTW_SSID_SCAN_AMOUNT);
-
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
-
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
-
- memcpy(ssid[0].Ssid, req->essid, len);
- ssid[0].SsidLength = len;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- _status = rtw_sitesurvey_cmd(padapter, ssid, 1);
-
- spin_unlock_bh(&pmlmepriv->lock);
- }
- } else {
- if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE &&
- !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
- int len = wrqu->data.length - WEXT_CSCAN_HEADER_SIZE;
- char *pos = extra + WEXT_CSCAN_HEADER_SIZE;
- char section;
- char sec_len;
- int ssid_index = 0;
-
- while (len >= 1) {
- section = *(pos++);
- len -= 1;
-
- switch (section) {
- case WEXT_CSCAN_SSID_SECTION:
- if (len < 1) {
- len = 0;
- break;
- }
- sec_len = *(pos++); len -= 1;
- if (sec_len > 0 &&
- sec_len <= len &&
- sec_len <= 32) {
- ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, sec_len);
- ssid_index++;
- }
- pos += sec_len;
- len -= sec_len;
- break;
- case WEXT_CSCAN_TYPE_SECTION:
- case WEXT_CSCAN_CHANNEL_SECTION:
- pos += 1;
- len -= 1;
- break;
- case WEXT_CSCAN_PASV_DWELL_SECTION:
- case WEXT_CSCAN_HOME_DWELL_SECTION:
- case WEXT_CSCAN_ACTV_DWELL_SECTION:
- pos += 2;
- len -= 2;
- break;
- default:
- len = 0; /* stop parsing */
- }
- }
-
- /* it has still some scan parameter to parse, we only do this now... */
- _status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
- } else {
- _status = rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
- }
- }
-
- if (!_status)
- ret = -1;
-
-exit:
-
- return ret;
-}
-
-static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- struct list_head *plist, *phead;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- char *ev = extra;
- char *stop = ev + wrqu->data.length;
- u32 ret = 0;
- u32 cnt = 0;
- u32 wait_for_surveydone;
- int wait_status;
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- /* P2P is enabled */
- wait_for_surveydone = 200;
- } else {
- /* P2P is disabled */
- wait_for_surveydone = 100;
- }
-
- wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
-
- while (check_fwstate(pmlmepriv, wait_status)) {
- msleep(30);
- cnt++;
- if (cnt > wait_for_surveydone)
- break;
- }
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- if ((stop - ev) < SCAN_ITEM_SIZE) {
- ret = -E2BIG;
- break;
- }
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- /* report network only if the current channel set contains the channel to which this network belongs */
- if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
- ev = translate_scan(padapter, a, pnetwork, ev, stop);
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- wrqu->data.length = ev - extra;
- wrqu->data.flags = 0;
-
- return ret;
-}
-
-/* set ssid flow */
-/* s1. rtw_set_802_11_infrastructure_mode() */
-/* s2. set_802_11_authenticaion_mode() */
-/* s3. set_802_11_encryption_mode() */
-/* s4. rtw_set_802_11_ssid() */
-static int rtw_wx_set_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct list_head *phead;
- struct wlan_network *pnetwork = NULL;
- enum ndis_802_11_auth_mode authmode;
- struct ndis_802_11_ssid ndis_ssid;
- u8 *dst_ssid, *src_ssid;
-
- uint ret = 0, len;
-
- ret = rtw_pwr_wakeup(padapter);
- if (ret)
- goto exit;
-
- if (!padapter->bup) {
- ret = -1;
- goto exit;
- }
-
- if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
- ret = -E2BIG;
- goto exit;
- }
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- ret = -1;
- goto exit;
- }
-
- authmode = padapter->securitypriv.ndisauthtype;
- if (wrqu->essid.flags && wrqu->essid.length) {
- len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE;
-
- memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
- ndis_ssid.SsidLength = len;
- memcpy(ndis_ssid.Ssid, extra, len);
- src_ssid = ndis_ssid.Ssid;
-
- spin_lock_bh(&queue->lock);
- phead = get_list_head(queue);
- pmlmepriv->pscanned = phead->next;
-
- while (phead != pmlmepriv->pscanned) {
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
-
- dst_ssid = pnetwork->network.Ssid.Ssid;
-
- if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
- (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) {
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
- continue;
- }
-
- if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
- ret = -1;
- spin_unlock_bh(&queue->lock);
- goto exit;
- }
-
- break;
- }
- }
- spin_unlock_bh(&queue->lock);
- rtw_set_802_11_authentication_mode(padapter, authmode);
- if (!rtw_set_802_11_ssid(padapter, &ndis_ssid)) {
- ret = -1;
- goto exit;
- }
- }
-
-exit:
- return ret;
-}
-
-static int rtw_wx_get_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- u32 len;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
-
- if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
- len = pcur_bss->Ssid.SsidLength;
- memcpy(extra, pcur_bss->Ssid.Ssid, len);
- } else {
- len = 0;
- *extra = 0;
- }
- wrqu->essid.length = len;
- wrqu->essid.flags = 1;
-
- return 0;
-}
-
-static int rtw_wx_set_rate(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
-{
- int i;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 datarates[NumRates];
- u32 target_rate = wrqu->bitrate.value;
- u32 fixed = wrqu->bitrate.fixed;
- u32 ratevalue = 0;
- u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
-
- if (target_rate == -1) {
- ratevalue = 11;
- goto set_rate;
- }
- target_rate = target_rate / 100000;
-
- switch (target_rate) {
- case 10:
- ratevalue = 0;
- break;
- case 20:
- ratevalue = 1;
- break;
- case 55:
- ratevalue = 2;
- break;
- case 60:
- ratevalue = 3;
- break;
- case 90:
- ratevalue = 4;
- break;
- case 110:
- ratevalue = 5;
- break;
- case 120:
- ratevalue = 6;
- break;
- case 180:
- ratevalue = 7;
- break;
- case 240:
- ratevalue = 8;
- break;
- case 360:
- ratevalue = 9;
- break;
- case 480:
- ratevalue = 10;
- break;
- case 540:
- ratevalue = 11;
- break;
- default:
- ratevalue = 11;
- break;
- }
-
-set_rate:
-
- for (i = 0; i < NumRates; i++) {
- if (ratevalue == mpdatarate[i]) {
- datarates[i] = mpdatarate[i];
- if (fixed == 0)
- break;
- } else {
- datarates[i] = 0xff;
- }
- }
-
- return rtw_setdatarate_cmd(padapter, datarates);
-}
-
-static int rtw_wx_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u16 max_rate = 0;
-
- max_rate = rtw_get_cur_max_rate((struct adapter *)rtw_netdev_priv(dev));
-
- if (max_rate == 0)
- return -EPERM;
-
- wrqu->bitrate.fixed = 0; /* no auto select */
- wrqu->bitrate.value = max_rate * 100000;
-
- return 0;
-}
-
-static int rtw_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-
-
- if (wrqu->rts.disabled) {
- padapter->registrypriv.rts_thresh = 2347;
- } else {
- if (wrqu->rts.value < 0 ||
- wrqu->rts.value > 2347)
- return -EINVAL;
-
- padapter->registrypriv.rts_thresh = wrqu->rts.value;
- }
-
- return 0;
-}
-
-static int rtw_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-
-
- wrqu->rts.value = padapter->registrypriv.rts_thresh;
- wrqu->rts.fixed = 0; /* no auto select */
- /* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
-
-
-
- return 0;
-}
-
-static int rtw_wx_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-
-
- if (wrqu->frag.disabled) {
- padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
- } else {
- if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
- wrqu->frag.value > MAX_FRAG_THRESHOLD)
- return -EINVAL;
-
- padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
- }
-
- return 0;
-}
-
-static int rtw_wx_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-
-
- wrqu->frag.value = padapter->xmitpriv.frag_len;
- wrqu->frag.fixed = 0; /* no auto select */
-
-
-
- return 0;
-}
-
-static int rtw_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wrqu->retry.value = 7;
- wrqu->retry.fixed = 0; /* no auto select */
- wrqu->retry.disabled = 1;
-
- return 0;
-}
-
-static int rtw_wx_set_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- u32 key, ret = 0;
- u32 keyindex_provided;
- struct ndis_802_11_wep wep;
- enum ndis_802_11_auth_mode authmode;
-
- struct iw_point *erq = &wrqu->encoding;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- memset(&wep, 0, sizeof(struct ndis_802_11_wep));
-
- key = erq->flags & IW_ENCODE_INDEX;
-
-
-
- if (erq->flags & IW_ENCODE_DISABLED) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype = authmode;
-
- goto exit;
- }
-
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- keyindex_provided = 1;
- } else {
- keyindex_provided = 0;
- key = padapter->securitypriv.dot11PrivacyKeyIndex;
- }
-
- /* set authentication mode */
- if (erq->flags & IW_ENCODE_OPEN) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype = authmode;
- } else if (erq->flags & IW_ENCODE_RESTRICTED) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- authmode = Ndis802_11AuthModeShared;
- padapter->securitypriv.ndisauthtype = authmode;
- } else {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype = authmode;
- }
-
- wep.KeyIndex = key;
- if (erq->length > 0) {
- wep.KeyLength = erq->length <= 5 ? 5 : 13;
-
- wep.Length = wep.KeyLength + offsetof(struct ndis_802_11_wep, KeyMaterial);
- } else {
- wep.KeyLength = 0;
-
- if (keyindex_provided == 1) {
- /* set key_id only, no given KeyMaterial(erq->length == 0). */
- padapter->securitypriv.dot11PrivacyKeyIndex = key;
-
- switch (padapter->securitypriv.dot11DefKeylen[key]) {
- case 5:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- break;
- case 13:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- break;
- default:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- break;
- }
-
- goto exit;
- }
- }
-
- wep.KeyIndex |= 0x80000000;
-
- memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
-
- if (!rtw_set_802_11_add_wep(padapter, &wep)) {
- if (rf_on == pwrpriv->rf_pwrstate)
- ret = -EOPNOTSUPP;
- goto exit;
- }
-
-exit:
-
-
-
- return ret;
-}
-
-static int rtw_wx_get_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- uint key;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *erq = &wrqu->encoding;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
-
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
- if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- return 0;
- }
- }
-
- key = erq->flags & IW_ENCODE_INDEX;
-
- if (key) {
- if (key > WEP_KEYS)
- return -EINVAL;
- key--;
- } else {
- key = padapter->securitypriv.dot11PrivacyKeyIndex;
- }
-
- erq->flags = key + 1;
-
- switch (padapter->securitypriv.ndisencryptstatus) {
- case Ndis802_11EncryptionNotSupported:
- case Ndis802_11EncryptionDisabled:
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- break;
- case Ndis802_11Encryption1Enabled:
- erq->length = padapter->securitypriv.dot11DefKeylen[key];
- if (erq->length) {
- memcpy(keybuf, padapter->securitypriv.dot11DefKey[key].skey, padapter->securitypriv.dot11DefKeylen[key]);
-
- erq->flags |= IW_ENCODE_ENABLED;
-
- if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen)
- erq->flags |= IW_ENCODE_OPEN;
- else if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeShared)
- erq->flags |= IW_ENCODE_RESTRICTED;
- } else {
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- }
- break;
- case Ndis802_11Encryption2Enabled:
- case Ndis802_11Encryption3Enabled:
- erq->length = 16;
- erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN | IW_ENCODE_NOKEY);
- break;
- default:
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- break;
- }
-
-
- return 0;
-}
-
-static int rtw_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wrqu->power.value = 0;
- wrqu->power.fixed = 0; /* no auto select */
- wrqu->power.disabled = 1;
-
- return 0;
-}
-
-static int rtw_wx_set_gen_ie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
- return ret;
-}
-
-static int rtw_wx_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_param *param = (struct iw_param *)&wrqu->param;
- int ret = 0;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
-
- break;
- case IW_AUTH_CIPHER_GROUP:
-
- break;
- case IW_AUTH_KEY_MGMT:
- /*
- * ??? does not use these parameters
- */
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- if (param->value) {
- /* wpa_supplicant is enabling the tkip countermeasure. */
- padapter->securitypriv.btkip_countermeasure = true;
- } else {
- /* wpa_supplicant is disabling the tkip countermeasure. */
- padapter->securitypriv.btkip_countermeasure = false;
- }
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- /* HACK:
- *
- * wpa_supplicant calls set_wpa_enabled when the driver
- * is loaded and unloaded, regardless of if WPA is being
- * used. No other calls are made which can be used to
- * determine if encryption will be used or not prior to
- * association being expected. If encryption is not being
- * used, drop_unencrypted is set to false, else true -- we
- * can use this to determine if the CAP_PRIVACY_ON bit should
- * be set.
- */
-
- if (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption1Enabled)
- break;/* it means init value, or using wep, ndisencryptstatus = Ndis802_11Encryption1Enabled, */
- /* then it needn't reset it; */
-
- if (param->value) {
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- }
-
- break;
- case IW_AUTH_80211_AUTH_ALG:
- /*
- * It's the starting point of a link layer connection using wpa_supplicant
- */
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- LeaveAllPowerSaveMode(padapter);
- rtw_disassoc_cmd(padapter, 500, false);
- rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
- }
- ret = wpa_set_auth_algs(dev, (u32)param->value);
- break;
- case IW_AUTH_WPA_ENABLED:
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int rtw_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- char *alg_name;
- u32 param_len;
- struct ieee_param *param = NULL;
- struct iw_point *pencoding = &wrqu->encoding;
- struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
- int ret = -1;
-
- param_len = sizeof(struct ieee_param) + pext->key_len;
- param = kzalloc(param_len, GFP_KERNEL);
- if (!param)
- return -ENOMEM;
-
- param->cmd = IEEE_CMD_SET_ENCRYPTION;
- memset(param->sta_addr, 0xff, ETH_ALEN);
-
- switch (pext->alg) {
- case IW_ENCODE_ALG_NONE:
- /* todo: remove key */
- /* remove = 1; */
- alg_name = "none";
- break;
- case IW_ENCODE_ALG_WEP:
- alg_name = "WEP";
- break;
- case IW_ENCODE_ALG_TKIP:
- alg_name = "TKIP";
- break;
- case IW_ENCODE_ALG_CCMP:
- alg_name = "CCMP";
- break;
- default:
- goto out;
- }
-
- strscpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
-
- if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
- param->u.crypt.set_tx = 1;
-
- /* cliW: WEP does not have group key
- * just not checking GROUP key setting
- */
- if ((pext->alg != IW_ENCODE_ALG_WEP) &&
- (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY))
- param->u.crypt.set_tx = 0;
-
- param->u.crypt.idx = (pencoding->flags & 0x00FF) - 1;
-
- if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
- memcpy(param->u.crypt.seq, pext->rx_seq, 8);
-
- if (pext->key_len) {
- param->u.crypt.key_len = pext->key_len;
- memcpy(param->u.crypt.key, pext + 1, pext->key_len);
- }
-
- ret = wpa_set_encryption(dev, param, param_len);
-
-out:
- kfree(param);
- return ret;
-}
-
-static int rtw_wx_get_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (extra) {
- wrqu->data.length = 14;
- wrqu->data.flags = 1;
- memcpy(extra, "<WIFI@REALTEK>", 14);
- }
-
- /* dump debug info here */
- return 0;
-}
-
-static int rtw_wx_read_rf(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u32 path, addr, data32;
-
- path = *(u32 *)extra;
- if (path != RF_PATH_A)
- return -EINVAL;
-
- addr = *((u32 *)extra + 1);
- data32 = rtl8188e_PHY_QueryRFReg(padapter, addr, 0xFFFFF);
- /*
- * IMPORTANT!!
- * Only when wireless private ioctl is at odd order,
- * "extra" would be copied to user space.
- */
- sprintf(extra, "0x%05x", data32);
-
- return 0;
-}
-
-static int rtw_wx_write_rf(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u32 path, addr, data32;
-
- path = *(u32 *)extra;
- if (path != RF_PATH_A)
- return -EINVAL;
-
- addr = *((u32 *)extra + 1);
- data32 = *((u32 *)extra + 2);
- rtl8188e_PHY_SetRFReg(padapter, addr, 0xFFFFF, data32);
-
- return 0;
-}
-
-static int rtw_wx_set_channel_plan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 channel_plan_req = (u8)(*((int *)wrqu));
-
- if (rtw_set_chplan_cmd(padapter, channel_plan_req) != _SUCCESS)
- return -EPERM;
-
- return 0;
-}
-
-static int rtw_get_ap_info(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- u32 cnt = 0, wpa_ielen;
- struct list_head *plist, *phead;
- unsigned char *pbuf;
- u8 bssid[ETH_ALEN];
- char data[32];
- struct wlan_network *pnetwork = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct iw_point *pdata = &wrqu->data;
-
- if (padapter->bDriverStopped || !pdata) {
- ret = -EINVAL;
- goto exit;
- }
-
- while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY | _FW_UNDER_LINKING)))) {
- msleep(30);
- cnt++;
- if (cnt > 100)
- break;
- }
- pdata->flags = 0;
- if (pdata->length >= 32) {
- if (copy_from_user(data, pdata->pointer, 32)) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- if (!mac_pton(data, bssid)) {
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- return -EINVAL;
- }
-
- if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN)) {
- /* BSSID match, then check if supporting wpa/wpa2 */
- pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
- if (pbuf && (wpa_ielen > 0)) {
- pdata->flags = 1;
- break;
- }
-
- pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
- if (pbuf && (wpa_ielen > 0)) {
- pdata->flags = 2;
- break;
- }
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (pdata->length >= 34) {
- if (copy_to_user(pdata->pointer + 32, (u8 *)&pdata->flags, 1)) {
- ret = -EINVAL;
- goto exit;
- }
- }
-
-exit:
-
- return ret;
-}
-
-static int rtw_set_pid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- int *pdata = (int *)wrqu;
- int selector;
-
- if (padapter->bDriverStopped || !pdata) {
- ret = -EINVAL;
- goto exit;
- }
-
- selector = *pdata;
- if (selector < 3 && selector >= 0) {
- padapter->pid[selector] = *(pdata + 1);
- ui_pid[selector] = *(pdata + 1);
- }
-exit:
- return ret;
-}
-
-static int rtw_wps_start(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *pdata = &wrqu->data;
- u32 u32wps_start = 0;
-
- if (!pdata)
- return -EINVAL;
- ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
- if (ret) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (padapter->bDriverStopped) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (u32wps_start == 0)
- u32wps_start = *extra;
-
- if (u32wps_start == 1) /* WPS Start */
- rtw_led_control(padapter, LED_CTL_START_WPS);
- else if (u32wps_start == 2) /* WPS Stop because of wps success */
- rtw_led_control(padapter, LED_CTL_STOP_WPS);
- else if (u32wps_start == 3) /* WPS Stop because of wps fail */
- rtw_led_control(padapter, LED_CTL_STOP_WPS_FAIL);
-
-exit:
- return ret;
-}
-
-static int rtw_wext_p2p_enable(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- enum P2P_ROLE init_role = P2P_ROLE_DISABLE;
-
- if (*extra == '0')
- init_role = P2P_ROLE_DISABLE;
- else if (*extra == '1')
- init_role = P2P_ROLE_DEVICE;
- else if (*extra == '2')
- init_role = P2P_ROLE_CLIENT;
- else if (*extra == '3')
- init_role = P2P_ROLE_GO;
-
- ret = rtw_p2p_enable(padapter, init_role);
- if (ret)
- return ret;
-
- /* set channel/bandwidth */
- if (init_role != P2P_ROLE_DISABLE) {
- u8 channel, ch_offset;
- u16 bwmode;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN)) {
- /* Stay at the listen state and wait for discovery. */
- channel = pwdinfo->listen_channel;
- pwdinfo->operating_channel = pwdinfo->listen_channel;
- ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- bwmode = HT_CHANNEL_WIDTH_20;
- } else {
- pwdinfo->operating_channel = pmlmeext->cur_channel;
-
- channel = pwdinfo->operating_channel;
- ch_offset = pmlmeext->cur_ch_offset;
- bwmode = pmlmeext->cur_bwmode;
- }
-
- set_channel_bwmode(padapter, channel, ch_offset, bwmode);
- }
-
- return 0;
-}
-
-static void rtw_p2p_set_go_nego_ssid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- memcpy(pwdinfo->nego_ssid, extra, strlen(extra));
- pwdinfo->nego_ssidlen = strlen(extra);
-}
-
-static int rtw_p2p_set_intent(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 intent = pwdinfo->intent;
-
- switch (wrqu->data.length) {
- case 1:
- intent = extra[0] - '0';
- break;
- case 2:
- intent = str_2char2num(extra[0], extra[1]);
- break;
- }
- if (intent <= 15)
- pwdinfo->intent = intent;
- else
- ret = -1;
- return ret;
-}
-
-static int rtw_p2p_set_listen_ch(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 listen_ch = pwdinfo->listen_channel; /* Listen channel number */
-
- switch (wrqu->data.length) {
- case 1:
- listen_ch = extra[0] - '0';
- break;
- case 2:
- listen_ch = str_2char2num(extra[0], extra[1]);
- break;
- }
-
- if ((listen_ch == 1) || (listen_ch == 6) || (listen_ch == 11)) {
- pwdinfo->listen_channel = listen_ch;
- set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- } else {
- ret = -1;
- }
-
- return ret;
-}
-
-static int rtw_p2p_set_op_ch(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-/* Commented by Albert 20110524 */
-/* This function is used to set the operating channel if the driver will become the group owner */
-
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 op_ch = pwdinfo->operating_channel; /* Operating channel number */
-
- switch (wrqu->data.length) {
- case 1:
- op_ch = extra[0] - '0';
- break;
- case 2:
- op_ch = str_2char2num(extra[0], extra[1]);
- break;
- }
-
- if (op_ch > 0)
- pwdinfo->operating_channel = op_ch;
- else
- ret = -1;
-
- return ret;
-}
-
-static int rtw_p2p_profilefound(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Comment by Albert 2010/10/13 */
- /* Input data format: */
- /* Ex: 0 */
- /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
- /* 0 => Reflush the profile record list. */
- /* 1 => Add the profile list */
- /* XX:XX:XX:XX:XX:XX => peer's MAC Address (ex: 00:E0:4C:00:00:01) */
- /* YY => SSID Length */
- /* SSID => SSID for persistence group */
-
- /* The upper application should pass the SSID to driver by using this rtw_p2p_profilefound function. */
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- if (extra[0] == '0') {
- /* Remove all the profile information of wifidirect_info structure. */
- memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
- pwdinfo->profileindex = 0;
- } else {
- if (pwdinfo->profileindex >= P2P_MAX_PERSISTENT_GROUP_NUM) {
- ret = -1;
- } else {
- int jj, kk;
-
- /* Add this profile information into pwdinfo->profileinfo */
- /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
- for (jj = 0, kk = 1; jj < ETH_ALEN; jj++, kk += 3)
- pwdinfo->profileinfo[pwdinfo->profileindex].peermac[jj] = key_2char2num(extra[kk], extra[kk + 1]);
-
- pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen = (extra[18] - '0') * 10 + (extra[19] - '0');
- memcpy(pwdinfo->profileinfo[pwdinfo->profileindex].ssid, &extra[20], pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen);
- pwdinfo->profileindex++;
- }
- }
- }
-
- return ret;
-}
-
-static void rtw_p2p_setDN(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
- memcpy(pwdinfo->device_name, extra, wrqu->data.length - 1);
- pwdinfo->device_name_len = wrqu->data.length - 1;
-}
-
-static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- u8 peerMACStr[17] = {0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u8 blnMatch = 0;
- u16 attr_content = 0;
- uint attr_contentlen = 0;
- /* 6 is the string "wpsCM =", 17 is the MAC addr, we have to clear it at wrqu->data.pointer */
- u8 attr_content_str[6 + 17] = {0x00};
-
- /* Commented by Albert 20110727 */
- /* The input data is the MAC address which the application wants to know its WPS config method. */
- /* After knowing its WPS config method, the application can decide the config method for provisioning discovery. */
- /* Format: iwpriv wlanx p2p_get_wpsCM 00:E0:4C:00:00:05 */
-
- if (copy_from_user(peerMACStr, wrqu->data.pointer + 6, 17))
- return -EFAULT;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- u8 *wpsie;
- uint wpsie_len = 0;
- __be16 be_tmp;
-
- /* The mac address is matched. */
- wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
- if (wpsie) {
- rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, (u8 *)&be_tmp, &attr_contentlen);
- if (attr_contentlen) {
- attr_content = be16_to_cpu(be_tmp);
- sprintf(attr_content_str, "\n\nM =%.4d", attr_content);
- blnMatch = 1;
- }
- }
- break;
- }
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (!blnMatch)
- sprintf(attr_content_str, "\n\nM = 0000");
-
- if (copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17))
- return -EFAULT;
- return 0;
-}
-
-static int rtw_p2p_get_go_device_address(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- u8 peerMACStr[17] = {0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u8 blnMatch = 0;
- u8 *p2pie;
- uint p2pielen = 0, attr_contentlen = 0;
- u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[100 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
-
- /* Commented by Albert 20121209 */
- /* The input data is the GO's interface address which the application wants to know its device address. */
- /* Format: iwpriv wlanx p2p_get2 go_devadd = 00:E0:4C:00:00:05 */
-
- if (copy_from_user(peerMACStr, wrqu->data.pointer + 10, 17))
- return -EFAULT;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- /* Commented by Albert 2011/05/18 */
- /* Match the device address located in the P2P IE */
- /* This is for the case that the P2P device address is not the same as the P2P interface address. */
-
- p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
- if (p2pie) {
- while (p2pie) {
- /* The P2P Device ID attribute is included in the Beacon frame. */
- /* The P2P Device Info attribute is included in the probe response frame. */
-
- memset(attr_content, 0x00, 100);
- if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device ID attribute of Beacon first */
- blnMatch = 1;
- break;
- } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device Info attribute of probe response */
- blnMatch = 1;
- break;
- }
-
- /* Get the next P2P IE */
- p2pie = rtw_get_p2p_ie(p2pie + p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
- }
- }
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
- else
- sprintf(go_devadd_str, "\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
- attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
-
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
- return -EFAULT;
- return 0;
-}
-
-static int rtw_p2p_get_device_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- u8 peerMACStr[17] = {0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u8 blnMatch = 0;
- u8 dev_type[8] = {0x00};
- uint dev_type_len = 0;
- u8 dev_type_str[17 + 9] = {0x00}; /* +9 is for the str "dev_type =", we have to clear it at wrqu->data.pointer */
-
- /* Commented by Albert 20121209 */
- /* The input data is the MAC address which the application wants to know its device type. */
- /* Such user interface could know the device type. */
- /* Format: iwpriv wlanx p2p_get2 dev_type = 00:E0:4C:00:00:05 */
-
- if (copy_from_user(peerMACStr, wrqu->data.pointer + 9, 17))
- return -EFAULT;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- u8 *wpsie;
- uint wpsie_len = 0;
-
- /* The mac address is matched. */
-
- wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12],
- pnetwork->network.IELength - 12,
- NULL, &wpsie_len);
- if (wpsie) {
- rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_PRIMARY_DEV_TYPE, dev_type, &dev_type_len);
- if (dev_type_len) {
- u16 type = 0;
- __be16 be_tmp;
-
- memcpy(&be_tmp, dev_type, 2);
- type = be16_to_cpu(be_tmp);
- sprintf(dev_type_str, "\n\nN =%.2d", type);
- blnMatch = 1;
- }
- }
- break;
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (!blnMatch)
- sprintf(dev_type_str, "\n\nN = 00");
-
- if (copy_to_user(wrqu->data.pointer, dev_type_str, 9 + 17)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int rtw_p2p_get_device_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- u8 peerMACStr[17] = {0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u8 blnMatch = 0;
- u8 dev_name[WPS_MAX_DEVICE_NAME_LEN] = {0x00};
- uint dev_len = 0;
- u8 dev_name_str[WPS_MAX_DEVICE_NAME_LEN + 5] = {0x00}; /* +5 is for the str "devN =", we have to clear it at wrqu->data.pointer */
-
- /* Commented by Albert 20121225 */
- /* The input data is the MAC address which the application wants to know its device name. */
- /* Such user interface could show peer device's device name instead of ssid. */
- /* Format: iwpriv wlanx p2p_get2 devN = 00:E0:4C:00:00:05 */
-
- if (copy_from_user(peerMACStr, wrqu->data.pointer + 5, 17))
- return -EFAULT;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- u8 *wpsie;
- uint wpsie_len = 0;
-
- /* The mac address is matched. */
- wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
- if (wpsie) {
- rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len);
- if (dev_len) {
- sprintf(dev_name_str, "\n\nN =%s", dev_name);
- blnMatch = 1;
- }
- }
- break;
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (!blnMatch)
- sprintf(dev_name_str, "\n\nN = 0000");
-
- if (copy_to_user(wrqu->data.pointer, dev_name_str, 5 + ((dev_len > 17) ? dev_len : 17)))
- return -EFAULT;
- return 0;
-}
-
-static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- u8 peerMACStr[17] = {0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u8 blnMatch = 0;
- u8 *p2pie;
- uint p2pielen = 0, attr_contentlen = 0;
- u8 attr_content[2] = {0x00};
-
- u8 inv_proc_str[17 + 8] = {0x00};
- /* +8 is for the str "InvProc =", we have to clear it at wrqu->data.pointer */
-
- /* Commented by Ouden 20121226 */
- /* The application wants to know P2P initiation procedure is supported or not. */
- /* Format: iwpriv wlanx p2p_get2 InvProc = 00:E0:4C:00:00:05 */
-
- if (copy_from_user(peerMACStr, wrqu->data.pointer + 8, 17))
- return -EFAULT;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- /* Commented by Albert 20121226 */
- /* Match the device address located in the P2P IE */
- /* This is for the case that the P2P device address is not the same as the P2P interface address. */
-
- p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
- if (p2pie) {
- while (p2pie) {
- if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_CAPABILITY, attr_content, &attr_contentlen)) {
- /* Handle the P2P capability attribute */
- blnMatch = 1;
- break;
- }
-
- /* Get the next P2P IE */
- p2pie = rtw_get_p2p_ie(p2pie + p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
- }
- }
- }
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (!blnMatch) {
- sprintf(inv_proc_str, "\nIP =-1");
- } else {
- if (attr_content[0] & 0x20)
- sprintf(inv_proc_str, "\nIP = 1");
- else
- sprintf(inv_proc_str, "\nIP = 0");
- }
- if (copy_to_user(wrqu->data.pointer, inv_proc_str, 8 + 17))
- return -EFAULT;
- return 0;
-}
-
-static int rtw_p2p_connect(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- u32 peer_channel = 0;
-
- /* Commented by Albert 20110304 */
- /* The input data contains two information. */
- /* 1. First information is the MAC address which wants to formate with */
- /* 2. Second information is the WPS PINCode or "pbc" string for push button method */
- /* Format: 00:E0:4C:00:00:05 */
- /* Format: 00:E0:4C:00:00:05 */
-
- if (pwdinfo->p2p_state == P2P_STATE_NONE)
- return ret;
-
- if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO)
- return -1;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- peer_channel = pnetwork->network.Configuration.DSConfig;
- break;
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (peer_channel) {
- memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
- memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
-
- pwdinfo->nego_req_info.peer_channel_num[0] = peer_channel;
- memcpy(pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN);
- pwdinfo->nego_req_info.benable = true;
-
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- if (rtw_p2p_state(pwdinfo) != P2P_STATE_GONEGO_OK) {
- /* Restore to the listen state if the current p2p state is not nego OK */
- rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
- }
-
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
- rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
-
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
- _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_GO_NEGO_TIMEOUT);
- } else {
- ret = -1;
- }
- return ret;
-}
-
-static void rtw_p2p_invite_req(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- int jj, kk;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- uint peer_channel = 0;
- u8 attr_content[50] = {0x00};
- u8 *p2pie;
- uint p2pielen = 0, attr_contentlen = 0;
- struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info;
-
- /* The input data contains two information items. */
- /* 1. First information is the P2P device address which you want to send to. */
- /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */
- /* Command line sample: iwpriv wlan0 p2p_set invite ="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */
- /* Format: 00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy */
-
- if (wrqu->data.length <= 37)
- return;
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- return;
- } else {
- /* Reset the content of struct tx_invite_req_info */
- pinvite_req_info->benable = false;
- memset(pinvite_req_info->go_bssid, 0x00, ETH_ALEN);
- memset(pinvite_req_info->go_ssid, 0x00, WLAN_SSID_MAXLEN);
- pinvite_req_info->ssidlen = 0x00;
- pinvite_req_info->operating_ch = pwdinfo->operating_channel;
- memset(pinvite_req_info->peer_macaddr, 0x00, ETH_ALEN);
- pinvite_req_info->token = 3;
- }
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- pinvite_req_info->peer_macaddr[jj] = key_2char2num(extra[kk], extra[kk + 1]);
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- /* Commented by Albert 2011/05/18 */
- /* Match the device address located in the P2P IE */
- /* This is for the case that the P2P device address is not the same as the P2P interface address. */
-
- p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
- if (p2pie) {
- /* The P2P Device ID attribute is included in the Beacon frame. */
- /* The P2P Device Info attribute is included in the probe response frame. */
-
- if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device ID attribute of Beacon first */
- if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- peer_channel = pnetwork->network.Configuration.DSConfig;
- break;
- }
- } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device Info attribute of probe response */
- if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- peer_channel = pnetwork->network.Configuration.DSConfig;
- break;
- }
- }
- }
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (peer_channel) {
- /* Store the GO's bssid */
- for (jj = 0, kk = 18; jj < ETH_ALEN; jj++, kk += 3)
- pinvite_req_info->go_bssid[jj] = key_2char2num(extra[kk], extra[kk + 1]);
-
- /* Store the GO's ssid */
- pinvite_req_info->ssidlen = wrqu->data.length - 36;
- memcpy(pinvite_req_info->go_ssid, &extra[36], (u32)pinvite_req_info->ssidlen);
- pinvite_req_info->benable = true;
- pinvite_req_info->peer_ch = peer_channel;
-
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
- rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INVITE_REQ);
-
- set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
-
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-
- _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_INVITE_TIMEOUT);
- }
-}
-
-static void rtw_p2p_set_persistent(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* The input data is 0 or 1 */
- /* 0: disable persistent group functionality */
- /* 1: enable persistent group founctionality */
-
- if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- return;
- } else {
- if (extra[0] == '0') /* Disable the persistent group function. */
- pwdinfo->persistent_supported = false;
- else if (extra[0] == '1') /* Enable the persistent group function. */
- pwdinfo->persistent_supported = true;
- else
- pwdinfo->persistent_supported = false;
- }
- pr_info("[%s] persistent_supported = %d\n", __func__, pwdinfo->persistent_supported);
-}
-
-static void rtw_p2p_prov_disc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 peerMAC[ETH_ALEN] = {0x00};
- int jj, kk;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct list_head *plist, *phead;
- struct __queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork = NULL;
- uint peer_channel = 0;
- u8 attr_content[100] = {0x00};
- u8 *p2pie;
- uint p2pielen = 0, attr_contentlen = 0;
-
- /* The input data contains two information items. */
- /* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */
- /* 2. Second information is the WPS configuration method which wants to discovery */
- /* Format: 00:E0:4C:00:00:05_display */
- /* Format: 00:E0:4C:00:00:05_keypad */
- /* Format: 00:E0:4C:00:00:05_pbc */
- /* Format: 00:E0:4C:00:00:05_label */
-
- if (pwdinfo->p2p_state == P2P_STATE_NONE) {
- return;
- } else {
- /* Reset the content of struct tx_provdisc_req_info excluded the wps_config_method_request. */
- memset(pwdinfo->tx_prov_disc_info.peerDevAddr, 0x00, ETH_ALEN);
- memset(pwdinfo->tx_prov_disc_info.peerIFAddr, 0x00, ETH_ALEN);
- memset(&pwdinfo->tx_prov_disc_info.ssid, 0x00, sizeof(struct ndis_802_11_ssid));
- pwdinfo->tx_prov_disc_info.peer_channel_num[0] = 0;
- pwdinfo->tx_prov_disc_info.peer_channel_num[1] = 0;
- pwdinfo->tx_prov_disc_info.benable = false;
- }
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
-
- if (!memcmp(&extra[18], "display", 7))
- pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_DISPLYA;
- else if (!memcmp(&extra[18], "keypad", 7))
- pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_KEYPAD;
- else if (!memcmp(&extra[18], "pbc", 3))
- pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON;
- else if (!memcmp(&extra[18], "label", 5))
- pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_LABEL;
- else
- return;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- if (peer_channel != 0)
- break;
-
- pnetwork = container_of(plist, struct wlan_network, list);
-
- /* Commented by Albert 2011/05/18 */
- /* Match the device address located in the P2P IE */
- /* This is for the case that the P2P device address is not the same as the P2P interface address. */
-
- p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
- if (p2pie) {
- while (p2pie) {
- /* The P2P Device ID attribute is included in the Beacon frame. */
- /* The P2P Device Info attribute is included in the probe response frame. */
-
- if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device ID attribute of Beacon first */
- if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- peer_channel = pnetwork->network.Configuration.DSConfig;
- break;
- }
- } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
- /* Handle the P2P Device Info attribute of probe response */
- if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- peer_channel = pnetwork->network.Configuration.DSConfig;
- break;
- }
- }
-
- /* Get the next P2P IE */
- p2pie = rtw_get_p2p_ie(p2pie + p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
- }
- }
-
- plist = plist->next;
- }
-
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- if (peer_channel) {
- memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN);
- memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN);
- pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16)peer_channel;
- pwdinfo->tx_prov_disc_info.benable = true;
- rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
- rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ);
-
- if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
- memcpy(&pwdinfo->tx_prov_disc_info.ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
- } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
- memcpy(pwdinfo->tx_prov_disc_info.ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
- pwdinfo->tx_prov_disc_info.ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
- }
-
- set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
-
- _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-
- _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
- }
-}
-
-/* This function is used to inform the driver the user had specified the pin code value or pbc */
-/* to application. */
-
-static void rtw_p2p_got_wpsinfo(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
- /* Added by Albert 20110328 */
- /* if the input data is P2P_NO_WPSINFO -> reset the wpsinfo */
- /* if the input data is P2P_GOT_WPSINFO_PEER_DISPLAY_PIN -> the utility just input the PIN code got from the peer P2P device. */
- /* if the input data is P2P_GOT_WPSINFO_SELF_DISPLAY_PIN -> the utility just got the PIN code from itself. */
- /* if the input data is P2P_GOT_WPSINFO_PBC -> the utility just determine to use the PBC */
-
- if (*extra == '0')
- pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
- else if (*extra == '1')
- pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PEER_DISPLAY_PIN;
- else if (*extra == '2')
- pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_SELF_DISPLAY_PIN;
- else if (*extra == '3')
- pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PBC;
- else
- pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
-}
-
-static int rtw_p2p_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- if (!memcmp(extra, "enable =", 7)) {
- rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]);
- } else if (!memcmp(extra, "setDN =", 6)) {
- wrqu->data.length -= 6;
- rtw_p2p_setDN(dev, info, wrqu, &extra[6]);
- } else if (!memcmp(extra, "profilefound =", 13)) {
- wrqu->data.length -= 13;
- rtw_p2p_profilefound(dev, info, wrqu, &extra[13]);
- } else if (!memcmp(extra, "prov_disc =", 10)) {
- wrqu->data.length -= 10;
- rtw_p2p_prov_disc(dev, info, wrqu, &extra[10]);
- } else if (!memcmp(extra, "nego =", 5)) {
- wrqu->data.length -= 5;
- rtw_p2p_connect(dev, info, wrqu, &extra[5]);
- } else if (!memcmp(extra, "intent =", 7)) {
- /* Commented by Albert 2011/03/23 */
- /* The wrqu->data.length will include the null character */
- /* So, we will decrease 7 + 1 */
- wrqu->data.length -= 8;
- rtw_p2p_set_intent(dev, info, wrqu, &extra[7]);
- } else if (!memcmp(extra, "ssid =", 5)) {
- wrqu->data.length -= 5;
- rtw_p2p_set_go_nego_ssid(dev, info, wrqu, &extra[5]);
- } else if (!memcmp(extra, "got_wpsinfo =", 12)) {
- wrqu->data.length -= 12;
- rtw_p2p_got_wpsinfo(dev, info, wrqu, &extra[12]);
- } else if (!memcmp(extra, "listen_ch =", 10)) {
- /* Commented by Albert 2011/05/24 */
- /* The wrqu->data.length will include the null character */
- /* So, we will decrease (10 + 1) */
- wrqu->data.length -= 11;
- rtw_p2p_set_listen_ch(dev, info, wrqu, &extra[10]);
- } else if (!memcmp(extra, "op_ch =", 6)) {
- /* Commented by Albert 2011/05/24 */
- /* The wrqu->data.length will include the null character */
- /* So, we will decrease (6 + 1) */
- wrqu->data.length -= 7;
- rtw_p2p_set_op_ch(dev, info, wrqu, &extra[6]);
- } else if (!memcmp(extra, "invite =", 7)) {
- wrqu->data.length -= 8;
- rtw_p2p_invite_req(dev, info, wrqu, &extra[7]);
- } else if (!memcmp(extra, "persistent =", 11)) {
- wrqu->data.length -= 11;
- rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]);
- }
-
- return 0;
-}
-
-static int rtw_p2p_get2(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
-
- if (!memcmp(extra, "wpsCM =", 6)) {
- wrqu->data.length -= 6;
- ret = rtw_p2p_get_wps_configmethod(dev, info, wrqu, &extra[6]);
- } else if (!memcmp(extra, "devN =", 5)) {
- wrqu->data.length -= 5;
- ret = rtw_p2p_get_device_name(dev, info, wrqu, &extra[5]);
- } else if (!memcmp(extra, "dev_type =", 9)) {
- wrqu->data.length -= 9;
- ret = rtw_p2p_get_device_type(dev, info, wrqu, &extra[9]);
- } else if (!memcmp(extra, "go_devadd =", 10)) {
- wrqu->data.length -= 10;
- ret = rtw_p2p_get_go_device_address(dev, info, wrqu, &extra[10]);
- } else if (!memcmp(extra, "InvProc =", 8)) {
- wrqu->data.length -= 8;
- ret = rtw_p2p_get_invitation_procedure(dev, info, wrqu, &extra[8]);
- }
-
- return ret;
-}
-
-static int rtw_rereg_nd_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct rereg_nd_name_data *rereg_priv = &padapter->rereg_nd_name_priv;
- char new_ifname[IFNAMSIZ];
-
- if (rereg_priv->old_ifname[0] == 0) {
- char *reg_ifname;
- reg_ifname = padapter->registrypriv.if2name;
-
- strscpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
- }
-
- if (wrqu->data.length > IFNAMSIZ)
- return -EFAULT;
-
- if (copy_from_user(new_ifname, wrqu->data.pointer, IFNAMSIZ))
- return -EFAULT;
-
- if (0 == strcmp(rereg_priv->old_ifname, new_ifname))
- return ret;
-
- ret = rtw_change_ifname(padapter, new_ifname);
- if (0 != ret)
- goto exit;
-
- if (!memcmp(rereg_priv->old_ifname, "disable%d", 9)) {
- padapter->ledpriv.bRegUseLed = rereg_priv->old_bRegUseLed;
- rtl8188eu_InitSwLeds(padapter);
- rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
- }
-
- strscpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
-
- if (!memcmp(new_ifname, "disable%d", 9)) {
- /* free network queue for Android's timming issue */
- rtw_free_network_queue(padapter, true);
-
- /* close led */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
- rereg_priv->old_bRegUseLed = padapter->ledpriv.bRegUseLed;
- padapter->ledpriv.bRegUseLed = false;
- rtl8188eu_DeInitSwLeds(padapter);
-
- /* the interface is being "disabled", we can do deeper IPS */
- rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv);
- rtw_ips_mode_req(&padapter->pwrctrlpriv, IPS_NORMAL);
- }
-exit:
- return ret;
-}
-
-static void mac_reg_dump(struct adapter *padapter)
-{
- int i, j = 1;
- u32 reg;
- int res;
-
- pr_info("\n ======= MAC REG =======\n");
- for (i = 0x0; i < 0x300; i += 4) {
- if (j % 4 == 1)
- pr_info("0x%02x", i);
-
- res = rtw_read32(padapter, i, &reg);
- if (!res)
- pr_info(" 0x%08x ", reg);
-
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
- for (i = 0x400; i < 0x800; i += 4) {
- if (j % 4 == 1)
- pr_info("0x%02x", i);
-
- res = rtw_read32(padapter, i, &reg);
- if (!res)
- pr_info(" 0x%08x ", reg);
-
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
-}
-
-static void bb_reg_dump(struct adapter *padapter)
-{
- int i, j = 1, res;
- u32 reg;
-
- pr_info("\n ======= BB REG =======\n");
- for (i = 0x800; i < 0x1000; i += 4) {
- if (j % 4 == 1)
- pr_info("0x%02x", i);
-
- res = rtw_read32(padapter, i, &reg);
- if (!res)
- pr_info(" 0x%08x ", reg);
-
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
-}
-
-static void rf_reg_dump(struct adapter *padapter)
-{
- int i, j = 1;
- u32 value;
-
- pr_info("\n ======= RF REG =======\n");
- pr_info("\nRF_Path(%x)\n", RF_PATH_A);
- for (i = 0; i < 0x100; i++) {
- value = rtl8188e_PHY_QueryRFReg(padapter, i, 0xffffffff);
- if (j % 4 == 1)
- pr_info("0x%02x ", i);
- pr_info(" 0x%08x ", value);
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
-}
-
-static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
- int res;
-
- switch (dm_func) {
- case 0:
- /* disable all dynamic func */
- odmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
- break;
- case 1:
- /* disable DIG */
- odmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
- break;
- case 6:
- /* turn on all dynamic func */
- if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
- struct rtw_dig *digtable = &odmpriv->DM_DigTable;
-
- res = rtw_read8(adapter, 0xc50, &digtable->CurIGValue);
- (void)res;
- /* FIXME: return an error to caller */
- }
- odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
- break;
- default:
- break;
- }
-}
-
-static void rtw_set_dm_func_flag(struct adapter *adapter, u32 odm_flag)
-{
- struct hal_data_8188e *haldata = &adapter->haldata;
- struct odm_dm_struct *odmpriv = &haldata->odmpriv;
-
- odmpriv->SupportAbility = odm_flag;
-}
-
-static int rtw_dbg_port(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- u8 major_cmd, minor_cmd;
- u16 arg;
- s32 extra_arg;
- u32 *pdata, val32;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- pdata = (u32 *)&wrqu->data;
-
- val32 = *pdata;
- arg = (u16)(val32 & 0x0000ffff);
- major_cmd = (u8)(val32 >> 24);
- minor_cmd = (u8)((val32 >> 16) & 0x00ff);
-
- extra_arg = *(pdata + 1);
-
- switch (major_cmd) {
- case 0x70:/* read_reg */
- switch (minor_cmd) {
- case 1:
- break;
- case 2:
- break;
- case 4:
- break;
- }
- break;
- case 0x71:/* write_reg */
- switch (minor_cmd) {
- case 1:
- rtw_write8(padapter, arg, extra_arg);
- break;
- case 2:
- rtw_write16(padapter, arg, extra_arg);
- break;
- case 4:
- rtw_write32(padapter, arg, extra_arg);
- break;
- }
- break;
- case 0x72:/* read_bb */
- break;
- case 0x73:/* write_bb */
- rtl8188e_PHY_SetBBReg(padapter, arg, 0xffffffff, extra_arg);
- break;
- case 0x74:/* read_rf */
- if (minor_cmd != RF_PATH_A) {
- ret = -EINVAL;
- break;
- }
- break;
- case 0x75:/* write_rf */
- if (minor_cmd != RF_PATH_A) {
- ret = -EINVAL;
- break;
- }
- rtl8188e_PHY_SetRFReg(padapter, arg, 0xffffffff, extra_arg);
- break;
-
- case 0x76:
- switch (minor_cmd) {
- case 0x00: /* normal mode, */
- padapter->recvpriv.is_signal_dbg = 0;
- break;
- case 0x01: /* dbg mode */
- padapter->recvpriv.is_signal_dbg = 1;
- extra_arg = extra_arg > 100 ? 100 : extra_arg;
- extra_arg = extra_arg < 0 ? 0 : extra_arg;
- padapter->recvpriv.signal_strength_dbg = extra_arg;
- break;
- }
- break;
- case 0x78: /* IOL test */
- switch (minor_cmd) {
- case 0x04: /* LLT table initialization test */
- {
- struct xmit_frame *xmit_frame;
-
- xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
- if (!xmit_frame) {
- ret = -ENOMEM;
- break;
- }
-
- if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 500, 0) != _SUCCESS)
- ret = -EPERM;
- }
- break;
- case 0x05: /* blink LED test */
- {
- u16 reg = 0x4c;
- u32 blink_num = 50;
- u32 blink_delay_ms = 200;
- int i;
- struct xmit_frame *xmit_frame;
-
- xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
- if (!xmit_frame) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < blink_num; i++) {
- rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x00, 0xff);
- rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
- rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x08, 0xff);
- rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
- }
- if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms * blink_num * 2) + 200, 0) != _SUCCESS)
- ret = -EPERM;
- }
- break;
-
- case 0x06: /* continuous write byte test */
- {
- u16 reg = arg;
- u16 start_value = 0;
- u32 write_num = extra_arg;
- int i, res;
- struct xmit_frame *xmit_frame;
- u8 val8;
-
- xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
- if (!xmit_frame) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < write_num; i++)
- rtw_IOL_append_WB_cmd(xmit_frame, reg, i + start_value, 0xFF);
- if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
- ret = -EPERM;
-
- /* FIXME: is this read necessary? */
- res = rtw_read8(padapter, reg, &val8);
- (void)res;
- }
- break;
-
- case 0x07: /* continuous write word test */
- {
- u16 reg = arg;
- u16 start_value = 200;
- u32 write_num = extra_arg;
- u16 val16;
- int i, res;
- struct xmit_frame *xmit_frame;
-
- xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
- if (!xmit_frame) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < write_num; i++)
- rtw_IOL_append_WW_cmd(xmit_frame, reg, i + start_value, 0xFFFF);
- if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
- ret = -EPERM;
-
- /* FIXME: is this read necessary? */
- res = rtw_read16(padapter, reg, &val16);
- (void)res;
- }
- break;
- case 0x08: /* continuous write dword test */
- {
- u16 reg = arg;
- u32 start_value = 0x110000c7;
- u32 write_num = extra_arg;
-
- int i;
- struct xmit_frame *xmit_frame;
-
- xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
- if (!xmit_frame) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < write_num; i++)
- rtw_IOL_append_WD_cmd(xmit_frame, reg, i + start_value, 0xFFFFFFFF);
- if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
- ret = -EPERM;
-
- /* FIXME: is this read necessary? */
- ret = rtw_read32(padapter, reg, &write_num);
- }
- break;
- }
- break;
- case 0x79:
- {
- /*
- * dbg 0x79000000 [value], set RESP_TXAGC to + value, value:0~15
- * dbg 0x79010000 [value], set RESP_TXAGC to - value, value:0~15
- */
- u8 value = extra_arg & 0x0f;
- u8 sign = minor_cmd;
- u16 write_value = 0;
-
- if (sign)
- value = value | 0x10;
-
- write_value = value | (value << 5);
- rtw_write16(padapter, 0x6d9, write_value);
- }
- break;
- case 0x7a:
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress
- , WLAN_REASON_EXPIRATION_CHK);
- break;
- case 0x7F:
- switch (minor_cmd) {
- case 0x0:
- break;
- case 0x01:
- break;
- case 0x02:
- break;
- case 0x03:
- break;
- case 0x04:
- break;
- case 0x05:
- rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
- break;
- case 0x06:
- {
- u32 ODMFlag = (u32)(0x0f & arg);
- rtw_set_dm_func_flag(padapter, ODMFlag);
- }
- break;
- case 0x07:
- break;
- case 0x08:
- break;
- case 0x09:
- break;
- case 0x15:
- break;
- case 0x10:/* driver version display */
- break;
- case 0x11:
- padapter->bRxRSSIDisplay = extra_arg;
- break;
- case 0x12: /* set rx_stbc */
- {
- struct registry_priv *pregpriv = &padapter->registrypriv;
- /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
- /* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
- if (extra_arg == 0 ||
- extra_arg == 1 ||
- extra_arg == 2 ||
- extra_arg == 3)
- pregpriv->rx_stbc = extra_arg;
- }
- break;
- case 0x13: /* set ampdu_enable */
- {
- struct registry_priv *pregpriv = &padapter->registrypriv;
- /* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
- if (extra_arg >= 0 && extra_arg < 3)
- pregpriv->ampdu_enable = extra_arg;
- }
- break;
- case 0x14: /* get wifi_spec */
- break;
- case 0x23:
- padapter->bNotifyChannelChange = extra_arg;
- break;
- case 0x24:
- padapter->bShowGetP2PState = extra_arg;
- break;
- case 0xdd:/* registers dump, 0 for mac reg, 1 for bb reg, 2 for rf reg */
- if (extra_arg == 0)
- mac_reg_dump(padapter);
- else if (extra_arg == 1)
- bb_reg_dump(padapter);
- else if (extra_arg == 2)
- rf_reg_dump(padapter);
- break;
- case 0xee:/* turn on/off dynamic funcs */
- if (extra_arg != 0xf) {
- /* extra_arg = 0 - disable all dynamic func
- * extra_arg = 1 - disable DIG
- * extra_arg = 6 - turn on all dynamic func
- */
- rtw_set_dynamic_functions(padapter, extra_arg);
- }
- break;
- case 0xfd:
- rtw_write8(padapter, 0xc50, arg);
- rtw_write8(padapter, 0xc58, arg);
- break;
- case 0xfe:
- break;
- case 0xff:
- break;
- }
- break;
- default:
- break;
- }
- return ret;
-}
-
-static int rtw_wx_set_priv(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
-{
- int ret = 0;
- int len = 0;
- char *ext;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *dwrq = (struct iw_point *)awrq;
-
- if (dwrq->length == 0)
- return -EFAULT;
-
- len = dwrq->length;
- ext = vmalloc(len);
- if (!ext)
- return -ENOMEM;
-
- if (copy_from_user(ext, dwrq->pointer, len)) {
- vfree(ext);
- return -EFAULT;
- }
-
- /* added for wps2.0 @20110524 */
- if (dwrq->flags == 0x8766 && len > 8) {
- u32 cp_sz;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 *probereq_wpsie = ext;
- int probereq_wpsie_len = len;
- u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
- (!memcmp(&probereq_wpsie[2], wps_oui, 4))) {
- cp_sz = min(probereq_wpsie_len, MAX_WPS_IE_LEN);
-
- pmlmepriv->wps_probe_req_ie_len = 0;
- kfree(pmlmepriv->wps_probe_req_ie);
- pmlmepriv->wps_probe_req_ie = NULL;
-
- pmlmepriv->wps_probe_req_ie = kmemdup(probereq_wpsie, cp_sz, GFP_KERNEL);
- if (!pmlmepriv->wps_probe_req_ie) {
- ret = -EINVAL;
- goto FREE_EXT;
- }
- pmlmepriv->wps_probe_req_ie_len = cp_sz;
- }
- goto FREE_EXT;
- }
-
- if (len >= WEXT_CSCAN_HEADER_SIZE &&
- !memcmp(ext, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
- ret = rtw_wx_set_scan(dev, info, awrq, ext);
- goto FREE_EXT;
- }
-
-FREE_EXT:
-
- vfree(ext);
-
- return ret;
-}
-
-static int rtw_pm_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- unsigned mode = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- if (!memcmp(extra, "lps =", 4)) {
- sscanf(extra + 4, "%u", &mode);
- ret = rtw_pm_set_lps(padapter, mode);
- } else if (!memcmp(extra, "ips =", 4)) {
- sscanf(extra + 4, "%u", &mode);
- ret = rtw_pm_set_ips(padapter, mode);
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static iw_handler rtw_handlers[] = {
- IW_HANDLER(SIOCGIWNAME, rtw_wx_get_name),
- IW_HANDLER(SIOCGIWFREQ, rtw_wx_get_freq),
- IW_HANDLER(SIOCSIWMODE, rtw_wx_set_mode),
- IW_HANDLER(SIOCGIWMODE, rtw_wx_get_mode),
- IW_HANDLER(SIOCGIWSENS, rtw_wx_get_sens),
- IW_HANDLER(SIOCGIWRANGE, rtw_wx_get_range),
- IW_HANDLER(SIOCSIWPRIV, rtw_wx_set_priv),
- IW_HANDLER(SIOCSIWAP, rtw_wx_set_wap),
- IW_HANDLER(SIOCGIWAP, rtw_wx_get_wap),
- IW_HANDLER(SIOCSIWMLME, rtw_wx_set_mlme),
- IW_HANDLER(SIOCSIWSCAN, rtw_wx_set_scan),
- IW_HANDLER(SIOCGIWSCAN, rtw_wx_get_scan),
- IW_HANDLER(SIOCSIWESSID, rtw_wx_set_essid),
- IW_HANDLER(SIOCGIWESSID, rtw_wx_get_essid),
- IW_HANDLER(SIOCGIWNICKN, rtw_wx_get_nick),
- IW_HANDLER(SIOCSIWRATE, rtw_wx_set_rate),
- IW_HANDLER(SIOCGIWRATE, rtw_wx_get_rate),
- IW_HANDLER(SIOCSIWRTS, rtw_wx_set_rts),
- IW_HANDLER(SIOCGIWRTS, rtw_wx_get_rts),
- IW_HANDLER(SIOCSIWFRAG, rtw_wx_set_frag),
- IW_HANDLER(SIOCGIWFRAG, rtw_wx_get_frag),
- IW_HANDLER(SIOCGIWRETRY, rtw_wx_get_retry),
- IW_HANDLER(SIOCSIWENCODE, rtw_wx_set_enc),
- IW_HANDLER(SIOCGIWENCODE, rtw_wx_get_enc),
- IW_HANDLER(SIOCGIWPOWER, rtw_wx_get_power),
- IW_HANDLER(SIOCSIWGENIE, rtw_wx_set_gen_ie),
- IW_HANDLER(SIOCSIWAUTH, rtw_wx_set_auth),
- IW_HANDLER(SIOCSIWENCODEEXT, rtw_wx_set_enc_ext),
- IW_HANDLER(SIOCSIWPMKSA, rtw_wx_set_pmkid),
-};
-
-static const struct iw_priv_args rtw_private_args[] = {
- {
- SIOCIWFIRSTPRIV + 0x0,
- IW_PRIV_TYPE_CHAR | 0x7FF, 0, "write"
- },
- {
- SIOCIWFIRSTPRIV + 0x1,
- IW_PRIV_TYPE_CHAR | 0x7FF,
- IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "read"
- },
- {
- SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
- },
- {
- SIOCIWFIRSTPRIV + 0x4,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
- },
- {
- SIOCIWFIRSTPRIV + 0x5,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setpid"
- },
- {
- SIOCIWFIRSTPRIV + 0x6,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
- },
- {
- SIOCIWFIRSTPRIV + 0xA,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
- },
-
- {
- SIOCIWFIRSTPRIV + 0xB,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "dbg"
- },
- {
- SIOCIWFIRSTPRIV + 0xC,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "rfw"
- },
- {
- SIOCIWFIRSTPRIV + 0xD,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rfr"
- },
- {
- SIOCIWFIRSTPRIV + 0x10,
- IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, 0, "p2p_set"
- },
- {
- SIOCIWFIRSTPRIV + 0x11,
- IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "p2p_get"
- },
- {
- SIOCIWFIRSTPRIV + 0x12,
- IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ, "p2p_get2"
- },
- {
- SIOCIWFIRSTPRIV + 0x16,
- IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
- },
-
- {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
-};
-
-static iw_handler rtw_private_handler[] = {
- NULL, /* 0x00 */
- NULL, /* 0x01 */
- NULL, /* 0x02 */
-NULL, /* 0x03 */
-/* for MM DTV platform */
- rtw_get_ap_info, /* 0x04 */
-
- rtw_set_pid, /* 0x05 */
- rtw_wps_start, /* 0x06 */
-
- NULL, /* 0x07 */
- NULL, /* 0x08 */
- NULL, /* 0x09 */
-
-/* Set Channel depend on the country code */
- rtw_wx_set_channel_plan, /* 0x0A */
-
- rtw_dbg_port, /* 0x0B */
- rtw_wx_write_rf, /* 0x0C */
- rtw_wx_read_rf, /* 0x0D */
- NULL, /* 0x0E */
- NULL, /* 0x0F */
-
- rtw_p2p_set, /* 0x10 */
- NULL, /* 0x11 */
- rtw_p2p_get2, /* 0x12 */
-
- NULL, /* 0x13 */
- NULL, /* 0x14 */
- NULL, /* 0x15 */
-
- rtw_pm_set, /* 0x16 */
- NULL, /* 0x17 */
- rtw_rereg_nd_name, /* 0x18 */
-};
-
-static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_statistics *piwstats = &padapter->iwstats;
- int tmp_noise = 0;
- int tmp;
-
- if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- piwstats->qual.qual = 0;
- piwstats->qual.level = 0;
- piwstats->qual.noise = 0;
- } else {
- tmp_noise = padapter->recvpriv.noise;
-
- piwstats->qual.level = padapter->signal_strength;
- tmp = 219 + 3 * padapter->signal_strength;
- tmp = min(100, tmp);
- tmp = max(0, tmp);
- piwstats->qual.qual = tmp;
- piwstats->qual.noise = tmp_noise;
- }
- piwstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- return &padapter->iwstats;
-}
-
-struct iw_handler_def rtw_handlers_def = {
- .standard = rtw_handlers,
- .num_standard = ARRAY_SIZE(rtw_handlers),
- .private = rtw_private_handler,
- .private_args = (struct iw_priv_args *)rtw_private_args,
- .num_private = ARRAY_SIZE(rtw_private_handler),
- .num_private_args = ARRAY_SIZE(rtw_private_args),
- .get_wireless_stats = rtw_get_wireless_stats,
-};
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
deleted file mode 100644
index dc419fd1ffa5..000000000000
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ /dev/null
@@ -1,807 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _OS_INTFS_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/hal_intf.h"
-#include "../include/rtw_ioctl.h"
-#include "../include/usb_osintf.h"
-#include "../include/rtw_br_ext.h"
-#include "../include/rtw_led.h"
-#include "../include/rtl8188e_dm.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
-MODULE_AUTHOR("Realtek Semiconductor Corp.");
-MODULE_FIRMWARE(FW_RTL8188EU);
-
-#define CONFIG_BR_EXT_BRNAME "br0"
-#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
-
-/* module param defaults */
-static int rtw_rfintfs = HWPI;
-static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
-static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
-static int rtw_channel = 1;/* ad-hoc support requirement */
-static int rtw_wireless_mode = WIRELESS_11BG_24N;
-static int rtw_vrtl_carrier_sense = AUTO_VCS;
-static int rtw_vcs_type = RTS_CTS;/* */
-static int rtw_rts_thresh = 2347;/* */
-static int rtw_frag_thresh = 2346;/* */
-static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
-static int rtw_scan_mode = 1;/* active, passive */
-static int rtw_adhoc_tx_pwr = 1;
-static int rtw_soft_ap;
-static int rtw_power_mgnt = 1;
-static int rtw_ips_mode = IPS_NORMAL;
-
-static int rtw_smart_ps = 2;
-
-module_param(rtw_ips_mode, int, 0644);
-MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
-
-static int rtw_radio_enable = 1;
-static int rtw_long_retry_lmt = 7;
-static int rtw_short_retry_lmt = 7;
-static int rtw_busy_thresh = 40;
-static int rtw_ack_policy = NORMAL_ACK;
-
-static int rtw_software_encrypt;
-static int rtw_software_decrypt;
-
-static int rtw_acm_method;/* 0:By SW 1:By HW. */
-
-static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
-static int rtw_uapsd_enable;
-static int rtw_uapsd_max_sp = NO_LIMIT;
-static int rtw_uapsd_acbk_en;
-static int rtw_uapsd_acbe_en;
-static int rtw_uapsd_acvi_en;
-static int rtw_uapsd_acvo_en;
-
-static int rtw_led_enable = 1;
-
-static int rtw_ht_enable = 1;
-static int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
-static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
-static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
-static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
-
-static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
-
-static int rtw_low_power;
-static int rtw_wifi_spec;
-static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
-static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-
-static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
-static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
-
-
-static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
-
-static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
-
-static int rtw_hw_wps_pbc = 1;
-
-int rtw_mc2u_disable;
-
-static int rtw_80211d;
-
-static char *ifname = "wlan%d";
-module_param(ifname, charp, 0644);
-MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
-
-static char *if2name = "wlan%d";
-module_param(if2name, charp, 0644);
-MODULE_PARM_DESC(if2name, "The default name to allocate for second interface");
-
-char *rtw_initmac; /* temp mac address if users want to use instead of the mac address in Efuse */
-
-module_param(rtw_initmac, charp, 0644);
-module_param(rtw_channel_plan, int, 0644);
-module_param(rtw_rfintfs, int, 0644);
-module_param(rtw_lbkmode, int, 0644);
-module_param(rtw_network_mode, int, 0644);
-module_param(rtw_channel, int, 0644);
-module_param(rtw_wmm_enable, int, 0644);
-module_param(rtw_vrtl_carrier_sense, int, 0644);
-module_param(rtw_vcs_type, int, 0644);
-module_param(rtw_busy_thresh, int, 0644);
-module_param(rtw_led_enable, int, 0644);
-module_param(rtw_ht_enable, int, 0644);
-module_param(rtw_cbw40_enable, int, 0644);
-module_param(rtw_ampdu_enable, int, 0644);
-module_param(rtw_rx_stbc, int, 0644);
-module_param(rtw_ampdu_amsdu, int, 0644);
-module_param(rtw_lowrate_two_xmit, int, 0644);
-module_param(rtw_power_mgnt, int, 0644);
-module_param(rtw_smart_ps, int, 0644);
-module_param(rtw_low_power, int, 0644);
-module_param(rtw_wifi_spec, int, 0644);
-module_param(rtw_antdiv_cfg, int, 0644);
-module_param(rtw_antdiv_type, int, 0644);
-module_param(rtw_hwpdn_mode, int, 0644);
-module_param(rtw_hwpwrp_detect, int, 0644);
-module_param(rtw_hw_wps_pbc, int, 0644);
-
-static uint rtw_max_roaming_times = 2;
-module_param(rtw_max_roaming_times, uint, 0644);
-MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
-
-static int rtw_fw_iol = 1;/* 0:Disable, 1:enable, 2:by usb speed */
-module_param(rtw_fw_iol, int, 0644);
-MODULE_PARM_DESC(rtw_fw_iol, "FW IOL");
-
-module_param(rtw_mc2u_disable, int, 0644);
-
-module_param(rtw_80211d, int, 0644);
-MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
-
-static uint rtw_notch_filter = RTW_NOTCH_FILTER;
-module_param(rtw_notch_filter, uint, 0644);
-MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
-
-static uint loadparam(struct adapter *padapter)
-{
- struct registry_priv *registry_par = &padapter->registrypriv;
-
- registry_par->rfintfs = (u8)rtw_rfintfs;
- registry_par->lbkmode = (u8)rtw_lbkmode;
- registry_par->network_mode = (u8)rtw_network_mode;
-
- memcpy(registry_par->ssid.Ssid, "ANY", 3);
- registry_par->ssid.SsidLength = 3;
-
- registry_par->channel = (u8)rtw_channel;
- registry_par->wireless_mode = (u8)rtw_wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
- registry_par->vcs_type = (u8)rtw_vcs_type;
- registry_par->rts_thresh = (u16)rtw_rts_thresh;
- registry_par->frag_thresh = (u16)rtw_frag_thresh;
- registry_par->preamble = (u8)rtw_preamble;
- registry_par->scan_mode = (u8)rtw_scan_mode;
- registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
- registry_par->soft_ap = (u8)rtw_soft_ap;
- registry_par->smart_ps = (u8)rtw_smart_ps;
- registry_par->power_mgnt = (u8)rtw_power_mgnt;
- registry_par->ips_mode = (u8)rtw_ips_mode;
- registry_par->radio_enable = (u8)rtw_radio_enable;
- registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
- registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
- registry_par->busy_thresh = (u16)rtw_busy_thresh;
- registry_par->ack_policy = (u8)rtw_ack_policy;
- registry_par->software_encrypt = (u8)rtw_software_encrypt;
- registry_par->software_decrypt = (u8)rtw_software_decrypt;
- registry_par->acm_method = (u8)rtw_acm_method;
-
- /* UAPSD */
- registry_par->wmm_enable = (u8)rtw_wmm_enable;
- registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
- registry_par->uapsd_max_sp = (u8)rtw_uapsd_max_sp;
- registry_par->uapsd_acbk_en = (u8)rtw_uapsd_acbk_en;
- registry_par->uapsd_acbe_en = (u8)rtw_uapsd_acbe_en;
- registry_par->uapsd_acvi_en = (u8)rtw_uapsd_acvi_en;
- registry_par->uapsd_acvo_en = (u8)rtw_uapsd_acvo_en;
-
- registry_par->led_enable = (u8)rtw_led_enable;
-
- registry_par->ht_enable = (u8)rtw_ht_enable;
- registry_par->cbw40_enable = (u8)rtw_cbw40_enable;
- registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
- registry_par->rx_stbc = (u8)rtw_rx_stbc;
- registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
- registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
- registry_par->low_power = (u8)rtw_low_power;
- registry_par->wifi_spec = (u8)rtw_wifi_spec;
- registry_par->channel_plan = (u8)rtw_channel_plan;
- registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
- registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
- registry_par->antdiv_type = (u8)rtw_antdiv_type;
- registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;/* 0:disable, 1:enable, 2:by EFUSE config */
- registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;/* 0:disable, 1:enable */
- registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
-
- registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
-
- registry_par->fw_iol = rtw_fw_iol;
-
- registry_par->enable80211d = (u8)rtw_80211d;
- snprintf(registry_par->ifname, 16, "%s", ifname);
- snprintf(registry_par->if2name, 16, "%s", if2name);
- registry_par->notch_filter = (u8)rtw_notch_filter;
-
- return _SUCCESS;
-}
-
-static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct sockaddr *addr = p;
-
- if (!padapter->bup)
- memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
-
- return 0;
-}
-
-static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- padapter->stats.tx_packets = pxmitpriv->tx_pkts;/* pxmitpriv->tx_pkts++; */
- padapter->stats.rx_packets = precvpriv->rx_pkts;/* precvpriv->rx_pkts++; */
- padapter->stats.tx_dropped = pxmitpriv->tx_drop;
- padapter->stats.rx_dropped = precvpriv->rx_drop;
- padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
- padapter->stats.rx_bytes = precvpriv->rx_bytes;
- return &padapter->stats;
-}
-
-/*
- * AC to queue mapping
- *
- * AC_VO -> queue 0
- * AC_VI -> queue 1
- * AC_BE -> queue 2
- * AC_BK -> queue 3
- */
-static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
-
-/* Given a data frame determine the 802.1p/1d tag to use. */
-static unsigned int rtw_classify8021d(struct sk_buff *skb)
-{
- unsigned int dscp;
-
- /* skb->priority values from 256->263 are magic values to
- * directly indicate a specific 802.1d priority. This is used
- * to allow 802.1d priority to be passed directly in from VLAN
- * tags, etc.
- */
- if (skb->priority >= 256 && skb->priority <= 263)
- return skb->priority - 256;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- dscp = ip_hdr(skb)->tos & 0xfc;
- break;
- default:
- return 0;
- }
-
- return dscp >> 5;
-}
-
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- skb->priority = rtw_classify8021d(skb);
-
- if (pmlmepriv->acm_mask != 0)
- skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
-
- return rtw_1d_to_queue[skb->priority];
-}
-
-u16 rtw_recv_select_queue(struct sk_buff *skb)
-{
- struct iphdr *piphdr;
- unsigned int dscp;
- __be16 eth_type;
- u32 priority;
- u8 *pdata = skb->data;
-
- memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2);
-
- switch (eth_type) {
- case htons(ETH_P_IP):
- piphdr = (struct iphdr *)(pdata + ETH_HLEN);
- dscp = piphdr->tos & 0xfc;
- priority = dscp >> 5;
- break;
- default:
- priority = 0;
- }
-
- return rtw_1d_to_queue[priority];
-}
-
-static const struct net_device_ops rtw_netdev_ops = {
- .ndo_open = netdev_open,
- .ndo_stop = netdev_close,
- .ndo_start_xmit = rtw_xmit_entry,
- .ndo_select_queue = rtw_select_queue,
- .ndo_set_mac_address = rtw_net_set_mac_address,
- .ndo_get_stats = rtw_net_get_stats,
-};
-
-int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
-{
- int err;
-
- err = dev_alloc_name(pnetdev, ifname);
- if (err < 0)
- return err;
-
- netif_carrier_off(pnetdev);
- return 0;
-}
-
-static const struct device_type wlan_type = {
- .name = "wlan",
-};
-
-struct net_device *rtw_init_netdev(struct adapter *old_padapter)
-{
- struct adapter *padapter;
- struct net_device *pnetdev;
-
- if (old_padapter)
- pnetdev = rtw_alloc_etherdev_with_old_priv(sizeof(struct adapter), (void *)old_padapter);
- else
- pnetdev = rtw_alloc_etherdev(sizeof(struct adapter));
-
- if (!pnetdev)
- return NULL;
-
- pnetdev->dev.type = &wlan_type;
- padapter = rtw_netdev_priv(pnetdev);
- padapter->pnetdev = pnetdev;
- pnetdev->netdev_ops = &rtw_netdev_ops;
- pnetdev->watchdog_timeo = HZ * 3; /* 3 second timeout */
- pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
-
- /* step 2. */
- loadparam(padapter);
-
- return pnetdev;
-}
-
-int rtw_start_drv_threads(struct adapter *padapter)
-{
- padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
- if (IS_ERR(padapter->cmdThread))
- return PTR_ERR(padapter->cmdThread);
-
- /* wait for rtw_cmd_thread() to start running */
- wait_for_completion(&padapter->cmdpriv.start_cmd_thread);
-
- return 0;
-}
-
-void rtw_stop_drv_threads(struct adapter *padapter)
-{
- /* Below is to termindate rtw_cmd_thread & event_thread... */
- complete(&padapter->cmdpriv.enqueue_cmd);
- if (padapter->cmdThread)
- /* wait for rtw_cmd_thread() to stop running */
- wait_for_completion(&padapter->cmdpriv.stop_cmd_thread);
-}
-
-static void rtw_init_default_value(struct adapter *padapter)
-{
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- /* xmit_priv */
- pxmitpriv->frag_len = pregistrypriv->frag_thresh;
-
- /* mlme_priv */
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
- pmlmepriv->scan_mode = SCAN_ACTIVE;
-
- /* ht_priv */
- pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
-
- /* security_priv */
- psecuritypriv->binstallGrpkey = false;
- psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
- psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psecuritypriv->dot11PrivacyKeyIndex = 0;
- psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- psecuritypriv->dot118021XGrpKeyid = 1;
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
- psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
-
- /* registry_priv */
- rtw_init_registrypriv_dev_network(padapter);
- rtw_update_registrypriv_dev_network(padapter);
-
- /* hal_priv */
- rtl8188eu_init_default_value(padapter);
-
- /* misc. */
- padapter->bReadPortCancel = false;
- padapter->bWritePortCancel = false;
- padapter->bRxRSSIDisplay = 0;
- padapter->bNotifyChannelChange = 0;
- padapter->bShowGetP2PState = 1;
-}
-
-void rtw_reset_drv_sw(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* hal_priv */
- rtl8188eu_init_default_value(padapter);
- padapter->bReadPortCancel = false;
- padapter->bWritePortCancel = false;
- padapter->bRxRSSIDisplay = 0;
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
-
- padapter->xmitpriv.tx_pkts = 0;
- padapter->recvpriv.rx_pkts = 0;
-
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
-
- /* mlmeextpriv */
- padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
-
- rtw_set_signal_stat_timer(&padapter->recvpriv);
-}
-
-u8 rtw_init_drv_sw(struct adapter *padapter)
-{
- if (rtw_init_cmd_priv(&padapter->cmdpriv)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_cmd_priv failed\n");
- return _FAIL;
- }
-
- padapter->cmdpriv.padapter = padapter;
-
- if (rtw_init_evt_priv(&padapter->evtpriv)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_evt_priv failed\n");
- goto free_cmd_priv;
- }
-
- if (rtw_init_mlme_priv(padapter)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "rtw_init_mlme_priv failed\n");
- goto free_evt_priv;
- }
-
- rtw_init_wifidirect_timers(padapter);
- init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
- reset_global_wifidirect_info(padapter);
-
- init_mlme_ext_priv(padapter);
-
- if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_xmit_priv failed\n");
- goto free_mlme_ext;
- }
-
- if (_rtw_init_recv_priv(&padapter->recvpriv, padapter)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_recv_priv failed\n");
- goto free_xmit_priv;
- }
-
- if (_rtw_init_sta_priv(&padapter->stapriv)) {
- dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_sta_priv failed\n");
- goto free_recv_priv;
- }
-
- padapter->stapriv.padapter = padapter;
-
- rtw_init_bcmc_stainfo(padapter);
-
- rtw_init_pwrctrl_priv(padapter);
-
- rtw_init_default_value(padapter);
-
- rtl8188e_init_dm_priv(padapter);
- rtl8188eu_InitSwLeds(padapter);
-
- spin_lock_init(&padapter->br_ext_lock);
-
- return _SUCCESS;
-
-free_recv_priv:
- _rtw_free_recv_priv(&padapter->recvpriv);
-
-free_xmit_priv:
- _rtw_free_xmit_priv(&padapter->xmitpriv);
-
-free_mlme_ext:
- free_mlme_ext_priv(&padapter->mlmeextpriv);
-
- rtw_free_mlme_priv(&padapter->mlmepriv);
-
-free_evt_priv:
- rtw_free_evt_priv(&padapter->evtpriv);
-
-free_cmd_priv:
- rtw_free_cmd_priv(&padapter->cmdpriv);
-
- return _FAIL;
-}
-
-void rtw_cancel_all_timer(struct adapter *padapter)
-{
- _cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
-
- _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
-
- _cancel_timer_ex(&padapter->mlmepriv.dynamic_chk_timer);
-
- /* cancel sw led timer */
- rtl8188eu_DeInitSwLeds(padapter);
-
- _cancel_timer_ex(&padapter->pwrctrlpriv.pwr_state_check_timer);
-
- _cancel_timer_ex(&padapter->recvpriv.signal_stat_timer);
-}
-
-void rtw_free_drv_sw(struct adapter *padapter)
-{
- /* we can call rtw_p2p_enable here, but: */
- /* 1. rtw_p2p_enable may have IO operation */
- /* 2. rtw_p2p_enable is bundled with wext interface */
- {
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- _cancel_timer_ex(&pwdinfo->find_phase_timer);
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
- rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
- }
- }
-
- free_mlme_ext_priv(&padapter->mlmeextpriv);
-
- rtw_free_cmd_priv(&padapter->cmdpriv);
-
- rtw_free_evt_priv(&padapter->evtpriv);
-
- rtw_free_mlme_priv(&padapter->mlmepriv);
- _rtw_free_xmit_priv(&padapter->xmitpriv);
-
- _rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
-
- _rtw_free_recv_priv(&padapter->recvpriv);
-
- /* free the old_pnetdev */
- if (padapter->rereg_nd_name_priv.old_pnetdev) {
- free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
- padapter->rereg_nd_name_priv.old_pnetdev = NULL;
- }
-
- /* clear pbuddystruct adapter to avoid access wrong pointer. */
- if (padapter->pbuddy_adapter)
- padapter->pbuddy_adapter->pbuddy_adapter = NULL;
-}
-
-void netdev_br_init(struct net_device *netdev)
-{
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
-
- rcu_read_lock();
-
- if (rcu_dereference(adapter->pnetdev->rx_handler_data)) {
- struct net_device *br_netdev;
- struct net *devnet = NULL;
-
- devnet = dev_net(netdev);
- br_netdev = dev_get_by_name(devnet, CONFIG_BR_EXT_BRNAME);
- if (br_netdev) {
- memcpy(adapter->br_mac, br_netdev->dev_addr, ETH_ALEN);
- dev_put(br_netdev);
- } else {
- pr_info("%s()-%d: dev_get_by_name(%s) failed!",
- __func__, __LINE__, CONFIG_BR_EXT_BRNAME);
- }
- }
- adapter->ethBrExtInfo.addPPPoETag = 1;
-
- rcu_read_unlock();
-}
-
-static int _netdev_open(struct net_device *pnetdev)
-{
- uint status;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
-
- if (!padapter->bup) {
- padapter->bDriverStopped = false;
- padapter->bSurpriseRemoved = false;
-
- status = rtw_hal_init(padapter);
- if (status == _FAIL)
- goto netdev_open_error;
-
- netdev_dbg(pnetdev, "MAC Address = %pM\n", pnetdev->dev_addr);
-
- if (rtw_start_drv_threads(padapter)) {
- pr_info("Initialize driver software resource Failed!\n");
- goto netdev_open_error;
- }
-
- if (init_hw_mlme_ext(padapter) == _FAIL) {
- pr_info("can't init mlme_ext_priv\n");
- goto netdev_open_error;
- }
- if (rtl8188eu_inirp_init(padapter))
- goto netdev_open_error;
-
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
- padapter->bup = true;
- }
- padapter->net_closed = false;
-
- _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
-
- padapter->pwrctrlpriv.bips_processing = false;
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
-
- if (!rtw_netif_queue_stopped(pnetdev))
- netif_tx_start_all_queues(pnetdev);
- else
- netif_tx_wake_all_queues(pnetdev);
-
- netdev_br_init(pnetdev);
-
- return 0;
-
-netdev_open_error:
- padapter->bup = false;
- netif_carrier_off(pnetdev);
- netif_tx_stop_all_queues(pnetdev);
- return -1;
-}
-
-int netdev_open(struct net_device *pnetdev)
-{
- int ret;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
-
- mutex_lock(padapter->hw_init_mutex);
- ret = _netdev_open(pnetdev);
- mutex_unlock(padapter->hw_init_mutex);
- return ret;
-}
-
-static int ips_netdrv_open(struct adapter *padapter)
-{
- int status = _SUCCESS;
- padapter->net_closed = false;
-
- padapter->bDriverStopped = false;
- padapter->bSurpriseRemoved = false;
-
- status = rtw_hal_init(padapter);
- if (status == _FAIL)
- goto netdev_open_error;
-
- if (rtl8188eu_inirp_init(padapter))
- goto netdev_open_error;
-
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
- _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 5000);
-
- return _SUCCESS;
-
-netdev_open_error:
- return _FAIL;
-}
-
-int rtw_ips_pwr_up(struct adapter *padapter)
-{
- int result;
- rtw_reset_drv_sw(padapter);
-
- result = ips_netdrv_open(padapter);
-
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
- return result;
-}
-
-void rtw_ips_pwr_down(struct adapter *padapter)
-{
- padapter->net_closed = true;
-
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
-
- rtw_ips_dev_unload(padapter);
-}
-
-static void rtw_fifo_cleanup(struct adapter *adapter)
-{
- struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv;
- u8 trycnt = 100;
- int res;
- u32 reg;
-
- /* pause tx */
- rtw_write8(adapter, REG_TXPAUSE, 0xff);
-
- /* keep sn */
- /* FIXME: return an error to caller */
- res = rtw_read16(adapter, REG_NQOS_SEQ, &adapter->xmitpriv.nqos_ssn);
- if (res)
- return;
-
- if (!pwrpriv->bkeepfwalive) {
- /* RX DMA stop */
- res = rtw_read32(adapter, REG_RXPKT_NUM, &reg);
- if (res)
- return;
-
- rtw_write32(adapter, REG_RXPKT_NUM,
- (reg | RW_RELEASE_EN));
- do {
- res = rtw_read32(adapter, REG_RXPKT_NUM, &reg);
- if (res)
- continue;
-
- if (!(reg & RXDMA_IDLE))
- break;
- } while (trycnt--);
-
- /* RQPN Load 0 */
- rtw_write16(adapter, REG_RQPN_NPQ, 0x0);
- rtw_write32(adapter, REG_RQPN, 0x80000000);
- mdelay(10);
- }
-}
-
-void rtw_ips_dev_unload(struct adapter *padapter)
-{
- rtw_fifo_cleanup(padapter);
-
- rtw_read_port_cancel(padapter);
- rtw_write_port_cancel(padapter);
-
- /* s5. */
- if (!padapter->bSurpriseRemoved)
- rtw_hal_deinit(padapter);
-}
-
-int netdev_close(struct net_device *pnetdev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
-
- padapter->net_closed = true;
-
- if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
- /* s1. */
- if (pnetdev) {
- if (!rtw_netif_queue_stopped(pnetdev))
- netif_tx_stop_all_queues(pnetdev);
- }
-
- /* s2. */
- LeaveAllPowerSaveMode(padapter);
- rtw_disassoc_cmd(padapter, 500, false);
- /* s2-2. indicate disconnect to os */
- rtw_indicate_disconnect(padapter);
- /* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
- /* s2-4. */
- rtw_free_network_queue(padapter, true);
- /* Close LED */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
- }
-
- nat25_db_cleanup(padapter);
-
- rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
-
- kfree(dvobj->firmware.data);
- dvobj->firmware.data = NULL;
-
- return 0;
-}
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
deleted file mode 100644
index 88271f956b52..000000000000
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _OSDEP_SERVICE_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_ioctl_set.h"
-
-/*
-* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
-* @return: one of RTW_STATUS_CODE
-*/
-inline int RTW_STATUS_CODE(int error_code)
-{
- if (error_code >= 0)
- return _SUCCESS;
- return _FAIL;
-}
-
-void *rtw_malloc2d(int h, int w, int size)
-{
- int j;
-
- void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL);
- if (!a)
- return NULL;
-
- for (j = 0; j < h; j++)
- a[j] = ((char *)(a + h)) + j * w * size;
-
- return a;
-}
-
-/*
-For the following list_xxx operations,
-caller must guarantee the atomic context.
-Otherwise, there will be racing condition.
-*/
-/*
-Caller must check if the list is empty before calling rtw_list_delete
-*/
-
-static const struct device_type wlan_type = {
- .name = "wlan",
-};
-
-struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
- void *old_priv)
-{
- struct net_device *pnetdev;
- struct rtw_netdev_priv_indicator *pnpi;
-
- pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
- if (!pnetdev)
- return NULL;
-
- pnetdev->dev.type = &wlan_type;
- pnpi = netdev_priv(pnetdev);
- pnpi->priv = old_priv;
- pnpi->sizeof_priv = sizeof_priv;
-
- return pnetdev;
-}
-
-struct net_device *rtw_alloc_etherdev(int sizeof_priv)
-{
- struct net_device *pnetdev;
- struct rtw_netdev_priv_indicator *pnpi;
-
- pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
- if (!pnetdev)
- return NULL;
-
- pnpi = netdev_priv(pnetdev);
-
- pnpi->priv = vzalloc(sizeof_priv);
- if (!pnpi->priv) {
- free_netdev(pnetdev);
- return NULL;
- }
-
- pnpi->sizeof_priv = sizeof_priv;
-
- return pnetdev;
-}
-
-void rtw_free_netdev(struct net_device *netdev)
-{
- struct rtw_netdev_priv_indicator *pnpi;
-
- pnpi = netdev_priv(netdev);
-
- vfree(pnpi->priv);
- free_netdev(netdev);
-}
-
-int rtw_change_ifname(struct adapter *padapter, const char *ifname)
-{
- struct net_device *pnetdev;
- struct net_device *cur_pnetdev;
- struct rereg_nd_name_data *rereg_priv;
- int ret;
-
- if (!padapter)
- goto error;
-
- cur_pnetdev = padapter->pnetdev;
- rereg_priv = &padapter->rereg_nd_name_priv;
-
- /* free the old_pnetdev */
- if (rereg_priv->old_pnetdev) {
- free_netdev(rereg_priv->old_pnetdev);
- rereg_priv->old_pnetdev = NULL;
- }
-
- if (!rtnl_is_locked())
- unregister_netdev(cur_pnetdev);
- else
- unregister_netdevice(cur_pnetdev);
-
- rereg_priv->old_pnetdev = cur_pnetdev;
-
- pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev) {
- ret = -1;
- goto error;
- }
-
- SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
-
- rtw_init_netdev_name(pnetdev, ifname);
-
- eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
-
- if (!rtnl_is_locked())
- ret = register_netdev(pnetdev);
- else
- ret = register_netdevice(pnetdev);
- if (ret != 0)
- goto error;
-
- return 0;
-error:
- return -1;
-}
-
-void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
-{
- u32 dup_len = 0;
- u8 *ori = NULL;
- u8 *dup = NULL;
-
- if (!buf || !buf_len)
- return;
-
- if (!src || !src_len)
- goto keep_ori;
-
- /* duplicate src */
- dup = kmalloc(src_len, GFP_ATOMIC);
- if (dup) {
- dup_len = src_len;
- memcpy(dup, src, dup_len);
- }
-
-keep_ori:
- ori = *buf;
-
- /* replace buf with dup */
- *buf_len = 0;
- *buf = dup;
- *buf_len = dup_len;
-
- /* free ori */
- kfree(ori);
-}
-
-/**
- * rtw_cbuf_empty - test if cbuf is empty
- * @cbuf: pointer of struct rtw_cbuf
- *
- * Returns: true if cbuf is empty
- */
-inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
-{
- return cbuf->write == cbuf->read;
-}
-
-/**
- * rtw_cbuf_pop - pop a pointer from cbuf
- * @cbuf: pointer of struct rtw_cbuf
- *
- * Lock free operation, be careful of the use scheme
- * Returns: pointer popped out
- */
-void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
-{
- void *buf;
- if (rtw_cbuf_empty(cbuf))
- return NULL;
-
- buf = cbuf->bufs[cbuf->read];
- cbuf->read = (cbuf->read + 1) % cbuf->size;
-
- return buf;
-}
-
-/**
- * rtw_cbuf_alloc - allocate a rtw_cbuf with given size and do initialization
- * @size: size of pointer
- *
- * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
- */
-struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
-{
- struct rtw_cbuf *cbuf;
-
- cbuf = kmalloc(struct_size(cbuf, bufs, size), GFP_KERNEL);
-
- if (cbuf) {
- cbuf->write = 0;
- cbuf->read = 0;
- cbuf->size = size;
- }
- return cbuf;
-}
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
deleted file mode 100644
index 74a16d1757ce..000000000000
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include <linux/usb.h>
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/hal_intf.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-#include "../include/usb_osintf.h"
-#include "../include/rtw_ioctl.h"
-#include "../include/rtl8188e_hal.h"
-
-int ui_pid[3] = {0, 0, 0};
-
-static int rtw_suspend(struct usb_interface *intf, pm_message_t message);
-static int rtw_resume(struct usb_interface *intf);
-
-static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid);
-static void rtw_dev_remove(struct usb_interface *pusb_intf);
-
-#define USB_VENDER_ID_REALTEK 0x0bda
-
-/* DID_USB_v916_20130116 */
-static struct usb_device_id rtw_usb_id_tbl[] = {
- /*=== Realtek demoboard ===*/
- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
- /*=== Customer ID ===*/
- /****** 8188EUS ********/
- {USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
- {USB_DEVICE(0x0DF6, 0x0076)}, /* Sitecom N150 v2 */
- {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
- {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
- {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
- {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
- {USB_DEVICE(0x056E, 0x4008)}, /* Elecom WDC-150SU2M */
- {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
- {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
- {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
- {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */
- {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811Un V2 */
- {} /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, rtw_usb_id_tbl);
-
-struct rtw_usb_drv {
- struct usb_driver usbdrv;
- int drv_registered;
- struct mutex hw_init_mutex;
-};
-
-static struct rtw_usb_drv rtl8188e_usb_drv = {
- .usbdrv.name = KBUILD_MODNAME,
- .usbdrv.probe = rtw_drv_init,
- .usbdrv.disconnect = rtw_dev_remove,
- .usbdrv.id_table = rtw_usb_id_tbl,
- .usbdrv.suspend = rtw_suspend,
- .usbdrv.resume = rtw_resume,
- .usbdrv.reset_resume = rtw_resume,
-};
-
-static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
-
-static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
-{
- int i;
- u8 rt_num_in_pipes = 0;
- struct dvobj_priv *pdvobjpriv;
- struct usb_host_config *phost_conf;
- struct usb_config_descriptor *pconf_desc;
- struct usb_host_interface *phost_iface;
- struct usb_interface_descriptor *piface_desc;
- struct usb_endpoint_descriptor *pendp_desc;
- struct usb_device *pusbd;
-
- pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
- if (!pdvobjpriv)
- goto err;
-
- pdvobjpriv->pusbintf = usb_intf;
- pusbd = interface_to_usbdev(usb_intf);
- pdvobjpriv->pusbdev = pusbd;
- usb_set_intfdata(usb_intf, pdvobjpriv);
-
- pdvobjpriv->RtNumOutPipes = 0;
-
- phost_conf = pusbd->actconfig;
- pconf_desc = &phost_conf->desc;
-
- phost_iface = &usb_intf->altsetting[0];
- piface_desc = &phost_iface->desc;
-
- pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
- pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
-
- for (i = 0; i < piface_desc->bNumEndpoints; i++) {
- int ep_num;
- pendp_desc = &phost_iface->endpoint[i].desc;
-
- ep_num = usb_endpoint_num(pendp_desc);
-
- if (usb_endpoint_is_bulk_in(pendp_desc)) {
- pdvobjpriv->RtInPipe = ep_num;
- rt_num_in_pipes++;
- } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
- pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
- ep_num;
- pdvobjpriv->RtNumOutPipes++;
- }
- }
-
- if (rt_num_in_pipes != 1)
- goto err;
-
- /* 3 misc */
- rtw_reset_continual_urb_error(pdvobjpriv);
-
- usb_get_dev(pusbd);
- return pdvobjpriv;
-
-err:
- kfree(pdvobjpriv);
- return NULL;
-}
-
-static void usb_dvobj_deinit(struct usb_interface *usb_intf)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
-
- usb_set_intfdata(usb_intf, NULL);
- if (dvobj) {
- /* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
- if ((dvobj->NumInterfaces != 2 &&
- dvobj->NumInterfaces != 3) ||
- (dvobj->InterfaceNumber == 1)) {
- if (interface_to_usbdev(usb_intf)->state !=
- USB_STATE_NOTATTACHED)
- /* If we didn't unplug usb dongle and
- * remove/insert module, driver fails
- * on sitesurvey for the first time when
- * device is up . Reset usb port for sitesurvey
- * fail issue. */
- usb_reset_device(interface_to_usbdev(usb_intf));
- }
- kfree(dvobj);
- }
-
- usb_put_dev(interface_to_usbdev(usb_intf));
-
-}
-
-static void rtw_dev_unload(struct adapter *padapter)
-{
- if (padapter->bup) {
- padapter->bDriverStopped = true;
- if (padapter->xmitpriv.ack_tx)
- rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
- /* s3. */
- rtw_read_port_cancel(padapter);
- rtw_write_port_cancel(padapter);
-
- /* s4. */
- rtw_stop_drv_threads(padapter);
-
- /* s5. */
- if (!padapter->bSurpriseRemoved) {
- rtw_hal_deinit(padapter);
- padapter->bSurpriseRemoved = true;
- }
-
- padapter->bup = false;
- }
-}
-
-static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
- struct adapter *padapter = dvobj->if1;
- struct net_device *pnetdev = padapter->pnetdev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- if ((!padapter->bup) || (padapter->bDriverStopped) ||
- (padapter->bSurpriseRemoved))
- goto exit;
-
- pwrpriv->bInSuspend = true;
- rtw_cancel_all_timer(padapter);
- LeaveAllPowerSaveMode(padapter);
-
- mutex_lock(&pwrpriv->lock);
- /* s1. */
- if (pnetdev) {
- netif_carrier_off(pnetdev);
- netif_tx_stop_all_queues(pnetdev);
- }
-
- /* s2. */
- rtw_disassoc_cmd(padapter, 0, false);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- pmlmepriv->to_roaming = 1;
- /* s2-2. indicate disconnect to os */
- rtw_indicate_disconnect(padapter);
- /* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
- /* s2-4. */
- rtw_free_network_queue(padapter, true);
-
- rtw_dev_unload(padapter);
- mutex_unlock(&pwrpriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- rtw_indicate_scan_done(padapter);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- rtw_indicate_disconnect(padapter);
-
-exit:
- return 0;
-}
-
-static int rtw_resume(struct usb_interface *pusb_intf)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
- struct adapter *padapter = dvobj->if1;
- struct net_device *pnetdev;
- struct pwrctrl_priv *pwrpriv = NULL;
- int ret = -1;
-
- pnetdev = padapter->pnetdev;
- pwrpriv = &padapter->pwrctrlpriv;
-
- mutex_lock(&pwrpriv->lock);
- rtw_reset_drv_sw(padapter);
- if (pwrpriv)
- pwrpriv->bkeepfwalive = false;
-
- if (netdev_open(pnetdev) != 0) {
- mutex_unlock(&pwrpriv->lock);
- goto exit;
- }
-
- netif_device_attach(pnetdev);
- netif_carrier_on(pnetdev);
-
- mutex_unlock(&pwrpriv->lock);
-
- if (padapter->pid[1] != 0)
- rtw_signal_process(padapter->pid[1], SIGUSR2);
-
- rtw_roaming(padapter, NULL);
-
- ret = 0;
-exit:
- if (pwrpriv)
- pwrpriv->bInSuspend = false;
-
- return ret;
-}
-
-/*
- * drv_init() - a device potentially for us
- *
- * notes: drv_init() is called when the bus driver has located
- * a card for us to support.
- * We accept the new device by returning 0.
- */
-
-static int rtw_usb_if1_init(struct dvobj_priv *dvobj, struct usb_interface *pusb_intf)
-{
- struct adapter *padapter = NULL;
- struct net_device *pnetdev = NULL;
- int ret;
-
- padapter = vzalloc(sizeof(*padapter));
- if (!padapter)
- return -ENOMEM;
-
- padapter->dvobj = dvobj;
- dvobj->if1 = padapter;
-
- padapter->bDriverStopped = true;
-
- padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
-
- rtw_handle_dualmac(padapter, 1);
-
- pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev) {
- ret = -ENODEV;
- goto handle_dualmac;
- }
- SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
- padapter = rtw_netdev_priv(pnetdev);
-
- /* step read_chip_version */
- rtl8188e_read_chip_version(padapter);
-
- /* step usb endpoint mapping */
- ret = rtl8188eu_interface_configure(padapter);
- if (ret)
- goto handle_dualmac;
-
- /* step read efuse/eeprom data and get mac_addr */
- ret = ReadAdapterInfo8188EU(padapter);
- if (ret)
- goto handle_dualmac;
-
- /* step 5. */
- if (rtw_init_drv_sw(padapter) == _FAIL) {
- ret = -ENODEV;
- goto handle_dualmac;
- }
-
-#ifdef CONFIG_PM
- if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
- dvobj->pusbdev->do_remote_wakeup = 1;
- pusb_intf->needs_remote_wakeup = 1;
- device_init_wakeup(&pusb_intf->dev, 1);
- }
-#endif
-
- /* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
- * suspend influence */
- usb_autopm_get_interface(pusb_intf);
-
- /* alloc dev name after read efuse. */
- ret = rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
- if (ret)
- goto free_drv_sw;
- rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
- rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
- padapter->eeprompriv.mac_addr);
- eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
-
- /* step 6. Tell the network stack we exist */
- ret = register_netdev(pnetdev);
- if (ret)
- goto free_drv_sw;
-
- return 0;
-
-free_drv_sw:
- rtw_cancel_all_timer(padapter);
- rtw_free_drv_sw(padapter);
-handle_dualmac:
- rtw_handle_dualmac(padapter, 0);
- if (pnetdev)
- rtw_free_netdev(pnetdev);
- else
- vfree(padapter);
-
- return ret;
-}
-
-static void rtw_usb_if1_deinit(struct adapter *if1)
-{
- struct net_device *pnetdev = if1->pnetdev;
- struct mlme_priv *pmlmepriv = &if1->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_disassoc_cmd(if1, 0, false);
-
- free_mlme_ap_info(if1);
-
- if (pnetdev) {
- /* will call netdev_close() */
- unregister_netdev(pnetdev);
- }
- rtw_cancel_all_timer(if1);
-
- rtw_dev_unload(if1);
- rtw_handle_dualmac(if1, 0);
- rtw_free_drv_sw(if1);
- if (pnetdev)
- rtw_free_netdev(pnetdev);
-}
-
-static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
-{
- struct dvobj_priv *dvobj;
- int ret;
-
- /* Initialize dvobj_priv */
- dvobj = usb_dvobj_init(pusb_intf);
- if (!dvobj)
- return -ENODEV;
-
- ret = rtw_usb_if1_init(dvobj, pusb_intf);
- if (ret) {
- usb_dvobj_deinit(pusb_intf);
- return ret;
- }
-
- if (ui_pid[1] != 0)
- rtw_signal_process(ui_pid[1], SIGUSR2);
-
- return 0;
-}
-
-/*
- * dev_remove() - our device is being removed
-*/
-/* rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove() => how to recognize both */
-static void rtw_dev_remove(struct usb_interface *pusb_intf)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
- struct adapter *padapter = dvobj->if1;
-
- if (usb_drv->drv_registered)
- padapter->bSurpriseRemoved = true;
-
- rtw_pm_set_ips(padapter, IPS_NONE);
- rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
-
- LeaveAllPowerSaveMode(padapter);
-
- rtw_usb_if1_deinit(padapter);
-
- usb_dvobj_deinit(pusb_intf);
-}
-
-static int __init rtw_drv_entry(void)
-{
- mutex_init(&usb_drv->hw_init_mutex);
-
- usb_drv->drv_registered = true;
- return usb_register(&usb_drv->usbdrv);
-}
-
-static void __exit rtw_drv_halt(void)
-{
- usb_drv->drv_registered = false;
- usb_deregister(&usb_drv->usbdrv);
-
- mutex_destroy(&usb_drv->hw_init_mutex);
-}
-
-module_init(rtw_drv_entry);
-module_exit(rtw_drv_halt);
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
deleted file mode 100644
index ca09f7ed7e4d..000000000000
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _USB_OPS_LINUX_C_
-
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_recv.h"
-
-static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
-{
- unsigned int pipe = 0, ep_num = 0;
- struct usb_device *pusbd = pdvobj->pusbdev;
-
- if (addr < HW_QUEUE_ENTRY) {
- ep_num = pdvobj->Queue2Pipe[addr];
- pipe = usb_sndbulkpipe(pusbd, ep_num);
- }
-
- return pipe;
-}
-
-void rtw_read_port_cancel(struct adapter *padapter)
-{
- int i;
- struct recv_buf *precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
-
- padapter->bReadPortCancel = true;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- precvbuf->reuse = true;
- usb_kill_urb(precvbuf->purb);
- precvbuf++;
- }
-}
-
-static void usb_write_port_complete(struct urb *purb)
-{
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
- struct adapter *padapter = pxmitbuf->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (pxmitbuf->high_queue)
- rtw_chk_hi_queue_cmd(padapter);
-
- switch (purb->status) {
- case 0:
- case -EINPROGRESS:
- case -ENOENT:
- case -ECONNRESET:
- case -EPIPE:
- case -EPROTO:
- break;
- case -ESHUTDOWN:
- padapter->bDriverStopped = true;
- break;
- default:
- padapter->bSurpriseRemoved = true;
- break;
- }
-
- rtw_sctx_done_err(&pxmitbuf->sctx,
- purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS);
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-}
-
-u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
-{
- unsigned long irqL;
- unsigned int pipe;
- int status;
- u32 ret = _FAIL;
- struct urb *purb = NULL;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem;
- struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
- struct usb_device *pusbd = pdvobj->pusbdev;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
- goto exit;
- }
-
- spin_lock_irqsave(&pxmitpriv->lock, irqL);
- pxmitbuf->high_queue = (addr == HIGH_QUEUE_INX);
- spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
-
- purb = pxmitbuf->pxmit_urb;
-
- /* translate DMA FIFO addr to pipehandle */
- pipe = ffaddr2pipehdl(pdvobj, addr);
-
- usb_fill_bulk_urb(purb, pusbd, pipe,
- pxmitframe->buf_addr, /* pxmitbuf->pbuf */
- cnt,
- usb_write_port_complete,
- pxmitbuf);/* context is pxmitbuf */
-
- status = usb_submit_urb(purb, GFP_ATOMIC);
- if (status) {
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
- if (status == -ENODEV)
- padapter->bDriverStopped = true;
- goto exit;
- }
-
- ret = _SUCCESS;
-
-/* We add the URB_ZERO_PACKET flag to urb so that the host will send the zero packet automatically. */
-
-exit:
- if (ret != _SUCCESS)
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
-
- return ret;
-}
-
-void rtw_write_port_cancel(struct adapter *padapter)
-{
- int i;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
-
- padapter->bWritePortCancel = true;
-
- for (i = 0; i < NR_XMITBUFF; i++) {
- usb_kill_urb(pxmitbuf->pxmit_urb);
- pxmitbuf++;
- }
-
- pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
- for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- usb_kill_urb(pxmitbuf->pxmit_urb);
- pxmitbuf++;
- }
-}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index d8455b23e555..d8408acfc763 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev);
static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
@@ -236,8 +235,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
if (priv->being_init_adapter)
return;
- _rtl92e_dm_check_ac_dc_power(dev);
-
_rtl92e_dm_check_txrateandretrycount(dev);
_rtl92e_dm_check_edca_turbo(dev);
@@ -255,26 +252,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
_rtl92e_dm_cts_to_self(dev);
}
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- static const char ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
- char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL};
- static char *envp[] = {"HOME=/",
- "TERM=linux",
- "PATH=/usr/bin:/bin",
- NULL};
-
- if (priv->rst_progress == RESET_TYPE_SILENT)
- return;
- if (priv->rtllib->state != RTLLIB_LINKED)
- return;
- call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC);
-
- return;
-};
-
-
void rtl92e_init_adaptive_rate(struct net_device *dev)
{
@@ -1660,10 +1637,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
u8 tmp1byte;
enum rt_rf_power_state rf_power_state_to_set;
bool bActuallySet = false;
- char *argv[3];
- static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin",
- NULL};
bActuallySet = false;
@@ -1693,14 +1666,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
mdelay(1000);
priv->hw_rf_off_action = 1;
rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
- if (priv->hw_radio_off)
- argv[1] = "RFOFF";
- else
- argv[1] = "RFON";
-
- argv[0] = (char *)RadioPowerPath;
- argv[2] = NULL;
- call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
}
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index a68b73858462..7587fa888527 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -107,13 +107,13 @@ struct security_priv {
u32 dot118021XGrpPrivacy; /* This specify the privacy algthm. used for Grp key */
u32 dot118021XGrpKeyid; /* key id used for Grp Key (tx key index) */
- union Keytype dot118021XGrpKey[BIP_MAX_KEYID]; /* 802.1x Group Key, for inx0 and inx1 */
- union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID];
- union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID];
+ union Keytype dot118021XGrpKey[BIP_MAX_KEYID + 1]; /* 802.1x Group Key, for inx0 and inx1 */
+ union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID + 1];
+ union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID + 1];
union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit. */
union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv. */
u32 dot11wBIPKeyid; /* key id used for BIP Key (tx key index) */
- union Keytype dot11wBIPKey[6]; /* BIP Key, for index4 and index5 */
+ union Keytype dot11wBIPKey[BIP_MAX_KEYID + 1]; /* BIP Key, for index4 and index5 */
union pn48 dot11wBIPtxpn; /* PN48 used for Grp Key xmit. */
union pn48 dot11wBIPrxpn; /* PN48 used for Grp Key recv. */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 54004f846cf0..84a9f4dd8f95 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -350,7 +350,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
pnetwork->mac_address, pnetwork->ssid.ssid,
pnetwork->ssid.ssid_length,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
@@ -711,6 +711,7 @@ exit:
static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
{
int ret = 0;
+ u8 max_idx;
u32 wep_key_idx, wep_key_len;
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -724,26 +725,29 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
goto exit;
}
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS
- || param->u.crypt.idx >= BIP_MAX_KEYID) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- {
+ if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
+ param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
+ param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
ret = -EINVAL;
goto exit;
}
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ max_idx = WEP_KEYS - 1;
+ else
+ max_idx = BIP_MAX_KEYID;
+
+ if (param->u.crypt.idx > max_idx) {
+ netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
+ ret = -EINVAL;
+ goto exit;
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
+ if (wep_key_len <= 0) {
ret = -EINVAL;
goto exit;
}
@@ -1135,8 +1139,8 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
select_network->mac_address, select_network->ssid.ssid,
- select_network->ssid.ssid_length, 0/*WLAN_CAPABILITY_ESS*/,
- 0/*WLAN_CAPABILITY_ESS*/);
+ select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
if (bss) {
cfg80211_unlink_bss(wiphy, bss);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 30374a820496..40a3157fb735 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -46,6 +46,7 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
{
int ret = 0;
+ u8 max_idx;
u32 wep_key_idx, wep_key_len, wep_total_len;
struct ndis_802_11_wep *pwep = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
@@ -60,19 +61,22 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS ||
- param->u.crypt.idx >= BIP_MAX_KEYID) {
- ret = -EINVAL;
- goto exit;
- }
- } else {
- {
- ret = -EINVAL;
- goto exit;
- }
+ if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
+ param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
+ param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ max_idx = WEP_KEYS - 1;
+ else
+ max_idx = BIP_MAX_KEYID;
+
+ if (param->u.crypt.idx > max_idx) {
+ netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
+ ret = -EINVAL;
+ goto exit;
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
@@ -84,9 +88,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
- if (wep_key_idx > WEP_KEYS)
- return -EINVAL;
-
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 2317fb077db0..557516c642c3 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
return param;
if (!(param->phase & phase)) {
- pr_err("Key \"%s\" may not be negotiated during ",
- param->name);
+ char *phase_name;
+
switch (phase) {
case PHASE_SECURITY:
- pr_debug("Security phase.\n");
+ phase_name = "Security";
break;
case PHASE_OPERATIONAL:
- pr_debug("Operational phase.\n");
+ phase_name = "Operational";
break;
default:
- pr_debug("Unknown phase.\n");
+ phase_name = "Unknown";
}
+ pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+ param->name, phase_name);
return NULL;
}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 297dc62bca29..372d64756ed6 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx,
goto out;
}
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+ if (arg->ret != TEEC_SUCCESS) {
+ pr_err("open_session failed %d\n", arg->ret);
+ handle_unload_ta(ta_handle);
+ kref_put(&sess->refcount, destroy_session);
+ goto out;
+ }
+
/* Find an empty session index for the given TA */
spin_lock(&sess->lock);
i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
- if (i < TEE_NUM_SESSIONS)
+ if (i < TEE_NUM_SESSIONS) {
+ sess->session_info[i] = session_info;
+ set_session_id(ta_handle, i, &arg->session);
set_bit(i, sess->sess_mask);
+ }
spin_unlock(&sess->lock);
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
}
- /* Open session with loaded TA */
- handle_open_session(arg, &session_info, param);
- if (arg->ret != TEEC_SUCCESS) {
- pr_err("open_session failed %d\n", arg->ret);
- spin_lock(&sess->lock);
- clear_bit(i, sess->sess_mask);
- spin_unlock(&sess->lock);
- handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
- goto out;
- }
-
- sess->session_info[i] = session_info;
- set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 40725cbc6eb0..90526f46c9b1 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -166,7 +166,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
- thermal_zone_device_enable(tzd);
pci_info->stored_thres = temp;
return 0;
@@ -268,6 +267,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
goto err_free_vectors;
}
+ ret = thermal_zone_device_enable(pci_info->tzone);
+ if (ret)
+ goto err_free_vectors;
+
return 0;
err_free_vectors:
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 55679fd86505..566df4522b88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -613,6 +613,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
struct thermal_instance *pos;
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
+ bool upper_no_limit;
int result;
if (trip >= tz->num_trips || trip < 0)
@@ -632,7 +633,13 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
- upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
+
+ if (upper == THERMAL_NO_LIMIT) {
+ upper = cdev->max_state;
+ upper_no_limit = true;
+ } else {
+ upper_no_limit = false;
+ }
if (lower > upper || upper > cdev->max_state)
return -EINVAL;
@@ -644,6 +651,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
dev->cdev = cdev;
dev->trip = trip;
dev->upper = upper;
+ dev->upper_no_limit = upper_no_limit;
dev->lower = lower;
dev->target = THERMAL_NO_TARGET;
dev->weight = weight;
@@ -1045,6 +1053,91 @@ devm_thermal_of_cooling_device_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
+static bool thermal_cooling_device_present(struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device *pos = NULL;
+
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ if (pos == cdev)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * thermal_cooling_device_update - Update a cooling device object
+ * @cdev: Target cooling device.
+ *
+ * Update @cdev to reflect a change of the underlying hardware or platform.
+ *
+ * Must be called when the maximum cooling state of @cdev becomes invalid and so
+ * its .get_max_state() callback needs to be run to produce the new maximum
+ * cooling state value.
+ */
+void thermal_cooling_device_update(struct thermal_cooling_device *cdev)
+{
+ struct thermal_instance *ti;
+ unsigned long state;
+
+ if (IS_ERR_OR_NULL(cdev))
+ return;
+
+ /*
+ * Hold thermal_list_lock throughout the update to prevent the device
+ * from going away while being updated.
+ */
+ mutex_lock(&thermal_list_lock);
+
+ if (!thermal_cooling_device_present(cdev))
+ goto unlock_list;
+
+ /*
+ * Update under the cdev lock to prevent the state from being set beyond
+ * the new limit concurrently.
+ */
+ mutex_lock(&cdev->lock);
+
+ if (cdev->ops->get_max_state(cdev, &cdev->max_state))
+ goto unlock;
+
+ thermal_cooling_device_stats_reinit(cdev);
+
+ list_for_each_entry(ti, &cdev->thermal_instances, cdev_node) {
+ if (ti->upper == cdev->max_state)
+ continue;
+
+ if (ti->upper < cdev->max_state) {
+ if (ti->upper_no_limit)
+ ti->upper = cdev->max_state;
+
+ continue;
+ }
+
+ ti->upper = cdev->max_state;
+ if (ti->lower > ti->upper)
+ ti->lower = ti->upper;
+
+ if (ti->target == THERMAL_NO_TARGET)
+ continue;
+
+ if (ti->target > ti->upper)
+ ti->target = ti->upper;
+ }
+
+ if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state)
+ goto unlock;
+
+ thermal_cooling_device_stats_update(cdev, state);
+
+unlock:
+ mutex_unlock(&cdev->lock);
+
+unlock_list:
+ mutex_unlock(&thermal_list_lock);
+}
+EXPORT_SYMBOL_GPL(thermal_cooling_device_update);
+
static void __unbind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev)
{
@@ -1067,20 +1160,17 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
int i;
const struct thermal_zone_params *tzp;
struct thermal_zone_device *tz;
- struct thermal_cooling_device *pos = NULL;
if (!cdev)
return;
mutex_lock(&thermal_list_lock);
- list_for_each_entry(pos, &thermal_cdev_list, node)
- if (pos == cdev)
- break;
- if (pos != cdev) {
- /* thermal cooling device not found */
+
+ if (!thermal_cooling_device_present(cdev)) {
mutex_unlock(&thermal_list_lock);
return;
}
+
list_del(&cdev->node);
/* Unbind all thermal zones associated with 'this' cdev */
@@ -1309,7 +1399,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
struct thermal_trip trip;
result = thermal_zone_get_trip(tz, count, &trip);
- if (result)
+ if (result || !trip.temperature)
set_bit(count, &tz->trips_disabled);
}
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 7af54382e915..3d4a787c6b28 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -101,6 +101,7 @@ struct thermal_instance {
struct list_head tz_node; /* node in tz->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
unsigned int weight; /* The weight of the cooling device */
+ bool upper_no_limit;
};
#define to_thermal_zone(_dev) \
@@ -127,6 +128,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev);
/* used only at binding time */
ssize_t trip_point_show(struct device *, struct device_attribute *, char *);
ssize_t weight_show(struct device *, struct device_attribute *, char *);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index cef860deaf91..a4aba7b8bb8b 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -685,6 +685,8 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
{
struct cooling_dev_stats *stats = cdev->stats;
+ lockdep_assert_held(&cdev->lock);
+
if (!stats)
return;
@@ -706,13 +708,22 @@ static ssize_t total_trans_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
- int ret;
+ struct cooling_dev_stats *stats;
+ int ret = 0;
+
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
spin_lock(&stats->lock);
ret = sprintf(buf, "%u\n", stats->total_trans);
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return ret;
}
@@ -721,11 +732,18 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
+ struct cooling_dev_stats *stats;
ssize_t len = 0;
int i;
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
+
spin_lock(&stats->lock);
+
update_time_in_state(stats);
for (i = 0; i <= cdev->max_state; i++) {
@@ -734,6 +752,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
}
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return len;
}
@@ -742,8 +763,16 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
- int i, states = cdev->max_state + 1;
+ struct cooling_dev_stats *stats;
+ int i, states;
+
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats)
+ goto unlock;
+
+ states = cdev->max_state + 1;
spin_lock(&stats->lock);
@@ -757,6 +786,9 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
spin_unlock(&stats->lock);
+unlock:
+ mutex_unlock(&cdev->lock);
+
return count;
}
@@ -764,10 +796,18 @@ static ssize_t trans_table_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
- struct cooling_dev_stats *stats = cdev->stats;
+ struct cooling_dev_stats *stats;
ssize_t len = 0;
int i, j;
+ mutex_lock(&cdev->lock);
+
+ stats = cdev->stats;
+ if (!stats) {
+ len = -ENODATA;
+ goto unlock;
+ }
+
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i <= cdev->max_state; i++) {
@@ -775,8 +815,10 @@ static ssize_t trans_table_show(struct device *dev,
break;
len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+ if (len >= PAGE_SIZE) {
+ len = PAGE_SIZE;
+ goto unlock;
+ }
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -799,8 +841,12 @@ static ssize_t trans_table_show(struct device *dev,
if (len >= PAGE_SIZE) {
pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n");
- return -EFBIG;
+ len = -EFBIG;
}
+
+unlock:
+ mutex_unlock(&cdev->lock);
+
return len;
}
@@ -830,6 +876,8 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
unsigned long states = cdev->max_state + 1;
int var;
+ lockdep_assert_held(&cdev->lock);
+
var = sizeof(*stats);
var += sizeof(*stats->time_in_state) * states;
var += sizeof(*stats->trans_table) * states * states;
@@ -855,6 +903,8 @@ out:
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
{
+ lockdep_assert_held(&cdev->lock);
+
kfree(cdev->stats);
cdev->stats = NULL;
}
@@ -879,6 +929,12 @@ void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
cooling_device_stats_destroy(cdev);
}
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
+{
+ cooling_device_stats_destroy(cdev);
+ cooling_device_stats_setup(cdev);
+}
+
/* these helper will be used only at the time of bindig */
ssize_t
trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 4339e706cc3a..f92ad71ef983 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port)
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
- debugfs_remove_recursive(debugfs_lookup("margining", parent));
+ if (parent)
+ debugfs_remove_recursive(debugfs_lookup("margining", parent));
kfree(port->usb4->margining);
port->usb4->margining = NULL;
@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)
static void margining_switch_remove(struct tb_switch *sw)
{
+ struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw;
- struct tb_port *downstream;
u64 route = tb_route(sw);
if (!route)
return;
- /*
- * Upstream is removed with the router itself but we need to
- * remove the downstream port margining directory.
- */
+ upstream = tb_upstream_port(sw);
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
+
+ margining_port_remove(upstream);
margining_port_remove(downstream);
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 4dce2edd86ea..cfebec107f3f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -46,7 +46,7 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
int reg = REG_RING_INTERRUPT_BASE +
ring_interrupt_index(ring) / 32 * 4;
- int bit = ring_interrupt_index(ring) & 31;
- int mask = 1 << bit;
+ int interrupt_bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << interrupt_bit;
u32 old, new;
if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
+ int auto_clear_bit;
int index;
if (ring->is_tx)
@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else
index = ring->hop + ring->nhi->hop_count;
- if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
- /*
- * Ask the hardware to clear interrupt status
- * bits automatically since we already know
- * which interrupt was triggered.
- */
- misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
- if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
- misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
- iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
- }
- }
+ /*
+ * Intel routers support a bit that isn't part of
+ * the USB4 spec to ask the hardware to clear
+ * interrupt status bits automatically since
+ * we already know which interrupt was triggered.
+ *
+ * Other routers explicitly disable auto-clear
+ * to prevent conditions that may occur where two
+ * MSIX interrupts are simultaneously active and
+ * reading the register clears both of them.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+ else
+ auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+ if (!(misc & auto_clear_bit))
+ iowrite32(misc | auto_clear_bit,
+ ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
@@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static void ring_clear_msix(const struct tb_ring *ring)
{
+ int bit;
+
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
+ bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx)
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
- 4 * (ring->nhi->hop_count / 32));
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+ 4 * (ring->nhi->hop_count / 32));
}
static irqreturn_t ring_msix(int irq, void *data)
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 0d4970dcef84..faef165a919c 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -77,12 +77,13 @@ struct ring_desc {
/*
* three bitfields: tx, rx, rx overflow
- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
- * cleared on read. New interrupts are fired only after ALL registers have been
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
+ * New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
#define REG_RING_NOTIFY_BASE 0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+#define REG_RING_INT_CLEAR 0x37808
/*
* two bitfields: rx, tx
@@ -105,6 +106,7 @@ struct ring_desc {
#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
+#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
#define REG_INMAIL_DATA 0x39900
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index b5f2ec79c4d6..1157b8869bcc 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
}
}
+static void quirk_clx_disable(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_NO_CLX;
+ tb_sw_dbg(sw, "disabling CL states\n");
+}
+
+static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_usb3_down(port))
+ continue;
+ port->max_bw = 16376;
+ tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
+ port->max_bw);
+ }
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = {
* DP buffers.
*/
{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
+ /*
+ * Limit the maximum USB3 bandwidth for the following Intel USB4
+ * host routers due to a hardware issue.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
+ /*
+ * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ */
+ { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
+ { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
};
/**
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 56008eb91e2e..9cc28197dbc4 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret;
}
+static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_set_inbound_sbtx(port, i);
+}
+
+static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
+ usb4_port_retimer_unset_inbound_sbtx(port, i);
+}
+
static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;
if (val) {
+ tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
} else {
@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}
exit_unlock:
+ tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
- usb4_port_retimer_set_inbound_sbtx(port, i);
+ tb_retimer_set_inbound_sbtx(port);
/*
* Before doing anything else, read the authentication status.
@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break;
}
+ tb_retimer_unset_inbound_sbtx(port);
+
if (!last_idx)
return 0;
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index 5185cf3e4d97..f37a4320f10a 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 3370e18ba05f..da373ac38285 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw)
dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
- tb_check_quirks(sw);
-
ret = tb_switch_set_uuid(sw);
if (ret) {
dev_err(&sw->dev, "failed to set UUID\n");
@@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw)
}
}
+ tb_check_quirks(sw);
+
tb_switch_default_link_ports(sw);
ret = tb_switch_update_link_attributes(sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index cbb20a277346..275ff5219a3a 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,11 @@
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
+/* Keep link controller awake during update */
+#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
+/* Disable CLx if not supported */
+#define QUIRK_NO_CLX BIT(1)
+
/**
* struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM
@@ -267,6 +272,8 @@ struct tb_bandwidth_group {
* @group: Bandwidth allocation group the adapter is assigned to. Only
* used for DP IN adapters for now.
* @group_list: The adapter is linked to the group's list of ports through this
+ * @max_bw: Maximum possible bandwidth through this adapter if set to
+ * non-zero.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -294,6 +301,7 @@ struct tb_port {
unsigned int dma_credits;
struct tb_bandwidth_group *group;
struct list_head group_list;
+ unsigned int max_bw;
};
/**
@@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
*/
static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
{
+ if (sw->quirks & QUIRK_NO_CLX)
+ return false;
+
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
@@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size);
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
@@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
-/* Keep link controller awake during update */
-#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
-
void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1e5e9c147a31..a0996cb2893c 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1579,6 +1579,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
}
/**
+ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Disables sideband channel transations on SBTX. The reverse of
+ * usb4_port_retimer_set_inbound_sbtx().
+ */
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
+{
+ return usb4_port_retimer_op(port, index,
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
+}
+
+/**
* usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port
* @index: Retimer index
@@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
usb4_port_retimer_nvm_read_block, &info);
}
+static inline unsigned int
+usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
+{
+ /* Take the possible bandwidth limitation into account */
+ if (port->max_bw)
+ return min(bw, port->max_bw);
+ return bw;
+}
+
/**
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
* @port: USB3 adapter port
@@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
return ret;
lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
- return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+ ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+
+ return usb4_usb3_port_max_bandwidth(port, ret);
}
/**
@@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port)
return 0;
lr = val & ADP_USB3_CS_4_ALR_MASK;
- return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+ ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+
+ return usb4_usb3_port_max_bandwidth(port, ret);
}
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
@@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
int downstream_bw)
{
u32 val, ubw, dbw, scale;
- int ret;
+ int ret, max_bw;
- /* Read the used scale, hardware default is 0 */
- ret = tb_port_read(port, &scale, TB_CFG_PORT,
- port->cap_adap + ADP_USB3_CS_3, 1);
+ /* Figure out suitable scale */
+ scale = 0;
+ max_bw = max(upstream_bw, downstream_bw);
+ while (scale < 64) {
+ if (mbps_to_usb3_bw(max_bw, scale) < 4096)
+ break;
+ scale++;
+ }
+
+ if (WARN_ON(scale >= 64))
+ return -EINVAL;
+
+ ret = tb_port_write(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;
- scale &= ADP_USB3_CS_3_SCALE_MASK;
ubw = mbps_to_usb3_bw(upstream_bw, scale);
dbw = mbps_to_usb3_bw(downstream_bw, scale);
+ tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
+
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 5bddb2f5e931..98764e740c07 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -43,6 +43,7 @@ struct xencons_info {
int irq;
int vtermno;
grant_ref_t gntref;
+ spinlock_t ring_lock;
};
static LIST_HEAD(xenconsoles);
@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
int sent = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */
if ((prod - cons) > sizeof(intf->out)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
if (sent)
notify_daemon(xencons);
@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
+ unsigned long flags;
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */
if ((prod - cons) > sizeof(intf->in)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
xencons->out_cons = intf->out_cons;
xencons->out_cons_same = 0;
}
+ if (!recv && xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
if (recv) {
notify_daemon(xencons);
- } else if (xencons->out_cons_same++ > 1) {
- eoiflag = XEN_EOI_FLAG_SPURIOUS;
}
xen_irq_lateeoi(xencons->irq, eoiflag);
@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
} else if (info->intf != NULL) {
/* already configured */
return 0;
@@ -275,6 +287,7 @@ err:
static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
{
+ spin_lock_init(&info->ring_lock);
info->evtchn = xen_start_info->console.domU.evtchn;
/* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
}
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->vtermno = xenbus_devid_to_vtermno(devid);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index aa80de3a8194..678014253b7b 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -534,7 +534,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
if (!serdev)
continue;
- serdev->dev.of_node = node;
+ device_set_node(&serdev->dev, of_fwnode_handle(node));
err = serdev_device_add(serdev);
if (err) {
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index f8e99995eee9..d94c3811a8f7 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -106,8 +106,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
memset(&up, 0, sizeof(up));
up.port.mapbase = regs->start;
up.port.irq = irq;
- up.port.type = PORT_UNKNOWN;
- up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
+ up.port.type = PORT_16750;
+ up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
up.port.dev = &pdev->dev;
up.port.private_data = priv;
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 8aad15622a2e..8adfaa183f77 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -34,7 +34,7 @@ int fsl8250_handle_irq(struct uart_port *port)
iir = port->serial_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- spin_unlock(&up->port.lock);
+ spin_unlock_irqrestore(&up->port.lock, flags);
return 0;
}
@@ -42,7 +42,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
- spin_unlock(&up->port.lock);
+ spin_unlock_irqrestore(&up->port.lock, flags);
return 1;
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 978dc196c29b..5313aa31930f 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -257,8 +257,9 @@ config SERIAL_8250_ASPEED_VUART
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
- depends on REGMAP && MFD_SYSCON
+ depends on MFD_SYSCON
depends on ARCH_ASPEED || COMPILE_TEST
+ select REGMAP
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
@@ -299,7 +300,6 @@ config SERIAL_8250_PCI1XXXX
tristate "Microchip 8250 based serial port"
depends on SERIAL_8250 && PCI
select SERIAL_8250_PCILIB
- default SERIAL_8250
help
Select this option if you have a setup with Microchip PCIe
Switch with serial port enabled and wish to enable 8250
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 625358f44419..0072892ca7fc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1313,7 +1313,7 @@ config SERIAL_FSL_LPUART
config SERIAL_FSL_LPUART_CONSOLE
bool "Console on Freescale lpuart serial port"
- depends on SERIAL_FSL_LPUART
+ depends on SERIAL_FSL_LPUART=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index e945f41b93d4..56e6ba3250cd 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1354,6 +1354,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct dma_chan *chan = sport->dma_rx_chan;
dmaengine_terminate_sync(chan);
+ del_timer_sync(&sport->lpuart_timer);
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
@@ -1813,7 +1814,6 @@ static int lpuart32_startup(struct uart_port *port)
static void lpuart_dma_shutdown(struct lpuart_port *sport)
{
if (sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
sport->lpuart_dma_rx_use = false;
}
@@ -1973,10 +1973,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }
spin_lock_irqsave(&sport->port.lock, flags);
@@ -2210,10 +2208,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }
spin_lock_irqsave(&sport->port.lock, flags);
@@ -2240,9 +2236,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- /* wait transmit engin complete */
- lpuart32_write(&sport->port, 0, UARTMODIR);
- lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so skip waiting for transmission complete when UARTCTRL_SBK is
+ * asserted.
+ */
+ if (!(old_ctrl & UARTCTRL_SBK)) {
+ lpuart32_write(&sport->port, 0, UARTMODIR);
+ lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ }
/* disable transmit and receive */
lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
@@ -3014,7 +3016,6 @@ static int lpuart_suspend(struct device *dev)
* cannot resume as expected, hence gracefully release the
* Rx DMA path before suspend and start Rx DMA path on resume.
*/
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
/* Disable Rx DMA to use UART port as wakeup source */
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index d69592e5e2ec..28fbc927a546 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -596,7 +596,7 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
if (!qcom_geni_serial_main_active(uport))
return;
- if (port->rx_dma_addr) {
+ if (port->tx_dma_addr) {
geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr,
port->tx_remaining);
port->tx_dma_addr = 0;
@@ -631,9 +631,8 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport)
if (port->tx_dma_addr)
return;
- xmit_size = uart_circ_chars_pending(xmit);
- if (xmit_size < WAKEUP_CHARS)
- uart_write_wakeup(uport);
+ if (uart_circ_empty(xmit))
+ return;
xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
@@ -1070,6 +1069,10 @@ static int setup_fifos(struct qcom_geni_serial_port *port)
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
disable_irq(uport->irq);
+
+ if (uart_console(uport))
+ return;
+
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 29c94be09159..abad091baeea 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1666,9 +1666,9 @@ MODULE_ALIAS("spi:sc16is7xx");
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
-static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
const struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
@@ -1709,7 +1709,7 @@ static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.name = SC16IS7XX_NAME,
.of_match_table = sc16is7xx_dt_ids,
},
- .probe = sc16is7xx_i2c_probe,
+ .probe_new = sc16is7xx_i2c_probe,
.remove = sc16is7xx_i2c_remove,
.id_table = sc16is7xx_i2c_id_table,
};
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 57a5c23b51d4..3c2ea9c098f7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4545,6 +4545,9 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
int c;
unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32;
+ if (vpitch > max_font_height)
+ return -EINVAL;
+
if (op->data) {
font.data = kvmalloc(max_font_size, GFP_KERNEL);
if (!font.data)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 172d25fef740..37e178a9ac47 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1500,7 +1500,7 @@ start_window:
scaling->window_start_t = curr_t;
scaling->tot_busy_t = 0;
- if (hba->outstanding_reqs) {
+ if (scaling->active_reqs) {
scaling->busy_start_t = curr_t;
scaling->is_busy_started = true;
} else {
@@ -2118,7 +2118,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.active_reqs--;
- if (!hba->outstanding_reqs && scaling->is_busy_started) {
+ if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
@@ -10512,4 +10512,5 @@ module_exit(ufshcd_core_exit);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL");
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 34fc453f3eb1..a02cd866e2f8 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1177,7 +1177,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
err = ufs_qcom_clk_scale_down_post_change(hba);
- if (err || !dev_req_params) {
+ if (err) {
ufshcd_uic_hibern8_exit(hba);
return err;
}
@@ -1451,8 +1451,8 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (IS_ERR(res->base)) {
dev_err(hba->dev, "Failed to map res %s, err=%d\n",
res->name, (int)PTR_ERR(res->base));
- res->base = NULL;
ret = PTR_ERR(res->base);
+ res->base = NULL;
return ret;
}
}
@@ -1466,7 +1466,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
/* Explicitly allocate MCQ resource from ufs_mem */
res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
if (!res_mcq)
- return ret;
+ return -ENOMEM;
res_mcq->start = res_mem->start +
MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
@@ -1478,7 +1478,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
ret);
- goto insert_res_err;
+ return ret;
}
res->base = devm_ioremap_resource(hba->dev, res_mcq);
@@ -1495,8 +1495,6 @@ out:
ioremap_err:
res->base = NULL;
remove_resource(res_mcq);
-insert_res_err:
- devm_kfree(hba->dev, res_mcq);
return ret;
}
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index deeea618ba33..1f6320d98a76 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
return NULL;
}
+ if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ func->devfn != PCI_DEV_FN_OTG) {
+ return NULL;
+ }
+
return func;
}
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
index 9b8325f82499..d63d5d92f255 100644
--- a/drivers/usb/cdns3/cdnsp-ep0.c
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
case USB_REQ_SET_ISOCH_DELAY:
ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- /*
- * Add request into pending list to block sending status stage
- * by libcomposite.
- */
- list_add_tail(&pdev->ep0_preq.list,
- &pdev->ep0_preq.pep->pending_list);
-
- ret = cdnsp_ep0_delegate_req(pdev, ctrl);
- if (ret == -EBUSY)
- ret = 0;
-
- list_del(&pdev->ep0_preq.list);
- break;
default:
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
break;
@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
else
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
- if (!len)
- pdev->ep0_stage = CDNSP_STATUS_STAGE;
-
if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdnsp_ep0_status_stage("delayed");
return;
@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
out:
if (ret < 0)
cdnsp_ep0_stall(pdev);
- else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+ else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
cdnsp_status_stage(pdev);
}
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index efd54ed918b9..7b151f5af3cc 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -29,30 +29,23 @@
#define PLAT_DRIVER_NAME "cdns-usbssp"
#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0100
+#define CDNS_DEVICE_ID 0x0200
+#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
- struct pci_dev *func;
-
/*
* Gets the second function.
- * It's little tricky, but this platform has two function.
- * The fist keeps resources for Host/Device while the second
- * keeps resources for DRD/OTG.
+ * Platform has two function. The fist keeps resources for
+ * Host/Device while the secon keeps resources for DRD/OTG.
*/
- func = pci_get_device(pdev->vendor, pdev->device, NULL);
- if (!func)
- return NULL;
+ if (pdev->device == CDNS_DEVICE_ID)
+ return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
+ else if (pdev->device == CDNS_DRD_ID)
+ return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
- if (func->devfn == pdev->devfn) {
- func = pci_get_device(pdev->vendor, pdev->device, func);
- if (!func)
- return NULL;
- }
-
- return func;
+ return NULL;
}
static int cdnsp_pci_probe(struct pci_dev *pdev,
@@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
+ CDNS_DRD_IF, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 005c67cb3afb..f210b7489fd5 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -208,6 +208,7 @@ struct hw_bank {
* @in_lpm: if the core in low power mode
* @wakeup_int: if wakeup interrupt occur
* @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
*/
struct ci_hdrc {
struct device *dev;
@@ -260,6 +261,7 @@ struct ci_hdrc {
bool in_lpm;
bool wakeup_int;
enum ci_revision rev;
+ struct mutex mutex;
};
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 27c601296130..281fc51720ce 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev,
strlen(ci->roles[role]->name)))
break;
- if (role == CI_ROLE_END || role == ci->role)
+ if (role == CI_ROLE_END)
return -EINVAL;
+ mutex_lock(&ci->mutex);
+
+ if (role == ci->role) {
+ mutex_unlock(&ci->mutex);
+ return n;
+ }
+
pm_runtime_get_sync(dev);
disable_irq(ci->irq);
ci_role_stop(ci);
@@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev,
ci_handle_vbus_change(ci);
enable_irq(ci->irq);
pm_runtime_put_sync(dev);
+ mutex_unlock(&ci->mutex);
return (ret == 0) ? n : ret;
}
@@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&ci->lock);
+ mutex_init(&ci->mutex);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 622c3b68aa1e..f5490f2a5b6b 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
void ci_handle_id_switch(struct ci_hdrc *ci)
{
- enum ci_role role = ci_otg_role(ci);
+ enum ci_role role;
+ mutex_lock(&ci->mutex);
+ role = ci_otg_role(ci);
if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
@@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci)
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
+ mutex_unlock(&ci->mutex);
}
/**
* ci_otg_work - perform otg (vbus/id) event handle
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index d8d6493bc457..a8605b02115b 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
+ dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
}
static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 62fa6378d2d7..8b15742d9e8a 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) {
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = dwc2_lowlevel_hw_enable(hsotg);
if (ret)
goto err;
@@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_peripheral(hsotg->uphy->otg, NULL);
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
return 0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 23ef75996823..d1589ba7d322 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
return 0;
}
-static void __dwc2_disable_regulators(void *data)
-{
- struct dwc2_hsotg *hsotg = data;
-
- regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
-}
-
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
if (ret)
return ret;
- ret = devm_add_action_or_reset(&pdev->dev,
- __dwc2_disable_regulators, hsotg);
- if (ret)
- return ret;
-
if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk);
if (ret)
@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);
- return 0;
+ return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
}
/**
@@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
dwc2_debugfs_init(hsotg);
/* Gadget code manages lowlevel hw on its own */
- if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
- (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+ if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
@@ -608,7 +595,7 @@ error_init:
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
error:
- if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
+ if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
return retval;
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 582ebd9cf9c2..4743e918dcaf 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1098,7 +1098,7 @@ struct dwc3_scratchpad_array {
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
- * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * @resume_hs_terminations: Set if we enable quirk for fixing improper crc
* generation after resume from suspend.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3c63fa97a680..cf5b4f49c3ed 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
*/
static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
{
+ struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
@@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
WARN_ON_ONCE(ret);
dep->resource_index = 0;
- if (!interrupt)
+ if (!interrupt) {
+ if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+ mdelay(1);
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
- else if (!ret)
+ } else if (!ret) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ }
dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;
@@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* enabled, the EndTransfer command will have completed upon
* returning from this function.
*
- * This mode is NOT available on the DWC_usb31 IP.
+ * This mode is NOT available on the DWC_usb31 IP. In this
+ * case, if the IOC bit is not set, then delay by 1ms
+ * after issuing the EndTransfer command. This allows for the
+ * controller to handle the command completely before DWC3
+ * remove requests attempts to unmap USB request buffers.
*/
__dwc3_stop_active_transfer(dep, force, interrupt);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index fa7dd6cf014d..5377d873c08e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2079,10 +2079,9 @@ unknown:
sizeof(url_descriptor->URL)
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
- if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH
- + landing_page_length)
- landing_page_length = ctrl->wLength
- - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
+ if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length)
+ landing_page_length = w_length
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
memcpy(url_descriptor->URL,
cdev->landing_page + landing_page_offset,
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index c1f62e91b012..4a42574b4a7f 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
uac = g_audio->uac;
card = uac->card;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 5402e4b7267b..12fc6eb67c3b 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -410,6 +410,7 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 0a943a154649..aca5f50eb0da 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -36,6 +36,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c7b763d6d102..1f8c9b16a0fb 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+ "JMicron",
+ "JMS583Gen 2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
"PNY",
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a0d943d78580..1ee774c263f0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1445,10 +1445,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt)
{
+ u32 vdo_hdr = port->vdo_data[0];
+
WARN_ON(!mutex_is_locked(&port->lock));
- /* Make sure we are not still processing a previous VDM packet */
- WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ /* If is sending discover_identity, handle received message first */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
+ port->send_discover = true;
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ } else {
+ /* Make sure we are not still processing a previous VDM packet */
+ WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ }
port->vdo_count = cnt + 1;
port->vdo_data[0] = header;
@@ -1948,11 +1956,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
switch (PD_VDO_CMD(vdo_hdr)) {
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
- if (res == 0)
+ if (res == 0) {
port->send_discover = false;
- else if (res == -EAGAIN)
+ } else if (res == -EAGAIN) {
+ port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS);
+ }
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -2035,6 +2045,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
unsigned long timeout;
port->vdm_retries = 0;
+ port->vdo_data[0] = 0;
port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(vdo_hdr);
mod_vdm_delayed_work(port, timeout);
@@ -4570,6 +4581,9 @@ static void run_state_machine(struct tcpm_port *port)
case SOFT_RESET:
port->message_id = 0;
port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
@@ -4589,6 +4603,9 @@ static void run_state_machine(struct tcpm_port *port)
case SOFT_RESET_SEND:
port->message_id = 0;
port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
tcpm_set_state_cond(port, hard_reset_state(port), 0);
else
@@ -4718,6 +4735,9 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case PR_SWAP_SNK_SRC_SINK_OFF:
+ /* will be source, remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
/*
* Prevent vbus discharge circuit from turning on during PR_SWAP
* as this is not a disconnect.
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f632350f6dcb..8d1baf28df55 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1125,12 +1125,11 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
return NULL;
}
-static int ucsi_register_port(struct ucsi *ucsi, int index)
+static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
{
struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
struct usb_power_delivery_capabilities_desc pd_caps;
struct usb_power_delivery_capabilities *pd_cap;
- struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
enum usb_role u_role = USB_ROLE_NONE;
@@ -1151,7 +1150,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
init_completion(&con->complete);
mutex_init(&con->lock);
INIT_LIST_HEAD(&con->partner_tasks);
- con->num = index + 1;
con->ucsi = ucsi;
cap->fwnode = ucsi_find_fwnode(con);
@@ -1328,8 +1326,8 @@ out_unlock:
*/
static int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi_connector *con;
- u64 command;
+ struct ucsi_connector *con, *connector;
+ u64 command, ntfy;
int ret;
int i;
@@ -1341,8 +1339,8 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Enable basic notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
@@ -1359,31 +1357,33 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
- sizeof(*ucsi->connector), GFP_KERNEL);
- if (!ucsi->connector) {
+ connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
ret = -ENOMEM;
goto err_reset;
}
/* Register all connectors */
for (i = 0; i < ucsi->cap.num_connectors; i++) {
- ret = ucsi_register_port(ucsi, i);
+ connector[i].num = i + 1;
+ ret = ucsi_register_port(ucsi, &connector[i]);
if (ret)
goto err_unregister;
}
/* Enable all notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
+ ucsi->connector = connector;
+ ucsi->ntfy = ntfy;
return 0;
err_unregister:
- for (con = ucsi->connector; con->port; con++) {
+ for (con = connector; con->port; con++) {
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
@@ -1399,10 +1399,7 @@ err_unregister:
typec_unregister_port(con->port);
con->port = NULL;
}
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-
+ kfree(connector);
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index ce0c8ef80c04..62206a6b8ea7 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
if (ret)
goto out_clear_bit;
- if (!wait_for_completion_timeout(&ua->complete, HZ))
+ if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
ret = -ETIMEDOUT;
out_clear_bit:
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 058fbe28107e..25fc4120b618 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -96,6 +96,7 @@ struct mlx5_vdpa_dev {
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+ bool suspended;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 3a0e721aef05..520646ae7fa0 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2438,7 +2438,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_mr;
- if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
goto err_mr;
restore_channels_info(ndev);
@@ -2606,6 +2606,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
+ ndev->mvdev.suspended = false;
ndev->cur_num_vqs = 0;
ndev->mvdev.cvq.received_desc = 0;
ndev->mvdev.cvq.completed_desc = 0;
@@ -2852,6 +2853,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
struct mlx5_vdpa_virtqueue *mvq;
int i;
+ mlx5_vdpa_info(mvdev, "suspending device\n");
+
down_write(&ndev->reslock);
ndev->nb_registered = false;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
@@ -2861,6 +2864,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
suspend_vq(ndev, mvq);
}
mlx5_vdpa_cvq_suspend(mvdev);
+ mvdev->suspended = true;
up_write(&ndev->reslock);
return 0;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6a0a65814626..eea23c630f7c 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -68,6 +68,17 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
(uintptr_t)vq->device_addr);
vq->vring.last_avail_idx = last_avail_idx;
+
+ /*
+ * Since vdpa_sim does not support receive inflight descriptors as a
+ * destination of a migration, let's set both avail_idx and used_idx
+ * the same at vq start. This is how vhost-user works in a
+ * VHOST_SET_VRING_BASE call.
+ *
+ * Although the simple fix is to set last_used_idx at
+ * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
+ */
+ vq->vring.last_used_idx = last_avail_idx;
vq->vring.notify = vdpasim_vq_notify;
}
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 8fe267ca3e76..281287fae89f 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
struct virtio_pci_modern_device *mdev = NULL;
mdev = vp_vdpa_mgtdev->mdev;
- vp_modern_remove(mdev);
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+ vp_modern_remove(mdev);
kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev);
kfree(vp_vdpa_mgtdev);
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index e897537a9e8a..d95fd382814c 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -442,16 +442,10 @@ static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd,
if (migf->pre_copy_initial_bytes > *pos) {
info.initial_bytes = migf->pre_copy_initial_bytes - *pos;
} else {
- buf = mlx5vf_get_data_buff_from_pos(migf, *pos, &end_of_data);
- if (buf) {
- info.dirty_bytes = buf->start_pos + buf->length - *pos;
- } else {
- if (!end_of_data) {
- ret = -EINVAL;
- goto err_migf_unlock;
- }
- info.dirty_bytes = inc_length;
- }
+ info.dirty_bytes = migf->max_pos - *pos;
+ if (!info.dirty_bytes)
+ end_of_data = true;
+ info.dirty_bytes += inc_length;
}
if (!end_of_data || !inc_length) {
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index dc12dbd5b43b..7be9d9d8f01c 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -1169,6 +1169,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
err_attach:
iommu_domain_free(v->domain);
+ v->domain = NULL;
return ret;
}
@@ -1213,6 +1214,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
vhost_vdpa_remove_as(v, asid);
}
+ vhost_vdpa_free_domain(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
}
@@ -1285,7 +1287,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_clean_irq(v);
vhost_vdpa_reset(v);
vhost_dev_stop(&v->vdev);
- vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index f65c96d1394d..e45338227be6 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -854,7 +854,7 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
board->caps = CLCD_CAP_ALL;
board->check = clcdfb_check;
board->decode = clcdfb_decode;
- if (of_find_property(node, "memory-region", NULL)) {
+ if (of_property_present(node, "memory-region")) {
board->setup = clcdfb_of_vram_setup;
board->mmap = clcdfb_of_vram_mmap;
board->remove = clcdfb_of_vram_remove;
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 81c315454428..b6b22fa4a8a0 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
u32 pixclock;
int screen_size, plane;
+ if (!var->pixclock)
+ return -EINVAL;
+
plane = fbdev->plane;
/* Make sure that the mode respect all LCD controller and
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 6403ae07970d..9cbadcd18b25 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -306,7 +306,7 @@ static int bw2_probe(struct platform_device *op)
if (!par->regs)
goto out_release_fb;
- if (!of_find_property(dp, "width", NULL)) {
+ if (!of_property_present(dp, "width")) {
err = bw2_do_default_mode(par, info, &linebytes);
if (err)
goto out_unmap_regs;
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index bdcc3f6ab666..3a37fff4df36 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -393,7 +393,7 @@ static int cg3_probe(struct platform_device *op)
cg3_blank(FB_BLANK_UNBLANK, info);
- if (!of_find_property(dp, "width", NULL)) {
+ if (!of_property_present(dp, "width")) {
err = cg3_do_default_mode(par);
if (err)
goto out_unmap_screen;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index cc37ec3f8fc1..7799d52a651f 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
if (rc)
return rc;
- if (pci_enable_device(dp) < 0) {
+ rc = pci_enable_device(dp);
+ if (rc < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n");
goto err_out;
}
- if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+ if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
addr = pci_resource_start(dp, 0);
- if (addr == 0)
+ if (addr == 0) {
+ rc = -ENODEV;
goto err_disable;
+ }
p = framebuffer_alloc(0, &dp->dev);
if (p == NULL) {
@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
init_chips(p, addr);
- if (register_framebuffer(p) < 0) {
+ rc = register_framebuffer(p);
+ if (rc < 0) {
dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
goto err_unmap;
}
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 45c75ff01eca..c8bfc608bd9c 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -238,8 +238,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->fix.mmio_start = res->start;
info->fix.mmio_len = resource_size(res);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- info->screen_base = devm_ioremap_resource(dev, res);
+ info->screen_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(info->screen_base)) {
ret = PTR_ERR(info->screen_base);
goto out_fb_release;
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index aa5f059d0222..274f5d0fa247 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
file->f_mapping->a_ops = &fb_deferred_io_aops;
+ fbdefio->open_count++;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_release(struct fb_info *info)
+static void fb_deferred_io_lastclose(struct fb_info *info)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
- BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
@@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info)
page->mapping = NULL;
}
}
+
+void fb_deferred_io_release(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ if (!--fbdefio->open_count)
+ fb_deferred_io_lastclose(info);
+}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- fb_deferred_io_release(info);
+ fb_deferred_io_lastclose(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 8130e9eee2b4..556d8b1a9e06 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -235,6 +235,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
if (var->xres > 1920 || var->yres > 1440)
return -EINVAL;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index 0a9e5067b201..a81095b2b1ea 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -1222,6 +1222,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
dinfo = GET_DINFO(info);
+ if (!var->pixclock)
+ return -EINVAL;
+
/* update the pitch */
if (intelfbhw_validate_mode(dinfo, var) != 0)
return -EINVAL;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index e60a276b4855..ea4ba3dfb96b 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -764,6 +764,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
int pitch, err = 0;
NVTRACE_ENTER();
+ if (!var->pixclock)
+ return -EINVAL;
var->transp.offset = 0;
var->transp.length = 0;
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index f7ad6bc9d02d..b97d251d894b 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -549,10 +549,10 @@ static void offb_init_nodriver(struct platform_device *parent, struct device_nod
int foreign_endian = 0;
#ifdef __BIG_ENDIAN
- if (of_get_property(dp, "little-endian", NULL))
+ if (of_property_read_bool(dp, "little-endian"))
foreign_endian = FBINFO_FOREIGN_ENDIAN;
#else
- if (of_get_property(dp, "big-endian", NULL))
+ if (of_property_read_bool(dp, "big-endian"))
foreign_endian = FBINFO_FOREIGN_ENDIAN;
#endif
diff --git a/drivers/video/fbdev/omap/Makefile b/drivers/video/fbdev/omap/Makefile
index 504edb9c09dd..6d5082c76919 100644
--- a/drivers/video/fbdev/omap/Makefile
+++ b/drivers/video/fbdev/omap/Makefile
@@ -18,7 +18,6 @@ objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
-lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c
deleted file mode 100644
index 8168ba0d47fd..000000000000
--- a/drivers/video/fbdev/omap/lcd_osk.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP OSK board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- * Adapted for OSK by <dirk.behme@de.bosch.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <linux/soc/ti/omap1-io.h>
-#include <linux/soc/ti/omap1-mux.h>
-
-#include "omapfb.h"
-
-static int osk_panel_enable(struct lcd_panel *panel)
-{
- /* configure PWL pin */
- omap_cfg_reg(PWL);
-
- /* Enable PWL unit */
- omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
-
- /* Set PWL level */
- omap_writeb(0xFF, OMAP_PWL_ENABLE);
-
- /* set GPIO2 high (lcd power enabled) */
- gpio_set_value(2, 1);
-
- return 0;
-}
-
-static void osk_panel_disable(struct lcd_panel *panel)
-{
- /* Set PWL level to zero */
- omap_writeb(0x00, OMAP_PWL_ENABLE);
-
- /* Disable PWL unit */
- omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
-
- /* set GPIO2 low */
- gpio_set_value(2, 0);
-}
-
-static struct lcd_panel osk_panel = {
- .name = "osk",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 12500,
- .hsw = 40,
- .hfp = 40,
- .hbp = 72,
- .vsw = 1,
- .vfp = 1,
- .vbp = 0,
- .pcd = 12,
-
- .enable = osk_panel_enable,
- .disable = osk_panel_disable,
-};
-
-static int osk_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&osk_panel);
- return 0;
-}
-
-static struct platform_driver osk_panel_driver = {
- .probe = osk_panel_probe,
- .driver = {
- .name = "lcd_osk",
- },
-};
-
-module_platform_driver(osk_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1f3df2055ff0..18736079843d 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -544,19 +544,25 @@ static int set_fb_var(struct fb_info *fbi,
var->yoffset = var->yres_virtual - var->yres;
if (plane->color_mode == OMAPFB_COLOR_RGB444) {
- var->red.offset = 8; var->red.length = 4;
- var->red.msb_right = 0;
- var->green.offset = 4; var->green.length = 4;
- var->green.msb_right = 0;
- var->blue.offset = 0; var->blue.length = 4;
- var->blue.msb_right = 0;
+ var->red.offset = 8;
+ var->red.length = 4;
+ var->red.msb_right = 0;
+ var->green.offset = 4;
+ var->green.length = 4;
+ var->green.msb_right = 0;
+ var->blue.offset = 0;
+ var->blue.length = 4;
+ var->blue.msb_right = 0;
} else {
- var->red.offset = 11; var->red.length = 5;
- var->red.msb_right = 0;
- var->green.offset = 5; var->green.length = 6;
- var->green.msb_right = 0;
- var->blue.offset = 0; var->blue.length = 5;
- var->blue.msb_right = 0;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->red.msb_right = 0;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->green.msb_right = 0;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->blue.msb_right = 0;
}
var->height = -1;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
index 0ae0cab252d3..09f719af0d0c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
@@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void)
omapdss_walk_device(dss, true);
for_each_available_child_of_node(dss, child) {
- if (!of_find_property(child, "compatible", NULL))
+ if (!of_property_present(child, "compatible"))
continue;
omapdss_walk_device(child, true);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index c3cd1e1cc01b..d16729215423 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -599,8 +599,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
/* handle IO resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio_base = devm_ioremap_resource(dev, r);
+ priv->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(priv->mmio_base))
return PTR_ERR(priv->mmio_base);
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index f743bfbde2a6..1f3cbe723def 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1737,10 +1737,10 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
#if defined(CONFIG_OF)
#ifdef __BIG_ENDIAN
- if (of_get_property(info->dev->parent->of_node, "little-endian", NULL))
+ if (of_property_read_bool(info->dev->parent->of_node, "little-endian"))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#else
- if (of_get_property(info->dev->parent->of_node, "big-endian", NULL))
+ if (of_property_read_bool(info->dev->parent->of_node, "big-endian"))
fb->flags |= FBINFO_FOREIGN_ENDIAN;
#endif
#endif
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 3feb6e40d56d..ef8a4c5fc687 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -922,6 +922,28 @@ SETUP_HCRX(struct stifb_info *fb)
/* ------------------- driver specific functions --------------------------- */
static int
+stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (var->xres != fb->info.var.xres ||
+ var->yres != fb->info.var.yres ||
+ var->bits_per_pixel != fb->info.var.bits_per_pixel)
+ return -EINVAL;
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->grayscale = fb->info.var.grayscale;
+ var->red.length = fb->info.var.red.length;
+ var->green.length = fb->info.var.green.length;
+ var->blue.length = fb->info.var.blue.length;
+
+ return 0;
+}
+
+static int
stifb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp, struct fb_info *info)
{
@@ -1145,6 +1167,7 @@ stifb_init_display(struct stifb_info *fb)
static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
+ .fb_check_var = stifb_check_var,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = stifb_fillrect,
@@ -1164,6 +1187,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
struct stifb_info *fb;
struct fb_info *info;
unsigned long sti_rom_address;
+ char modestr[32];
char *dev_name;
int bpp, xres, yres;
@@ -1342,6 +1366,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
+ scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
+ fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
+
/* This has to be done !!! */
if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
goto out_err1;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 01d87f53324d..f2eaf6e7fff6 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -379,8 +379,7 @@ static int tcx_probe(struct platform_device *op)
spin_lock_init(&par->lock);
- par->lowdepth =
- (of_find_property(dp, "tcx-8-bit", NULL) != NULL);
+ par->lowdepth = of_property_read_bool(dp, "tcx-8-bit");
sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 14d37c49633c..b44004880f0d 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -173,6 +173,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct tga_par *par = (struct tga_par *)info->par;
+ if (!var->pixclock)
+ return -EINVAL;
+
if (par->tga_type == TGA_TYPE_8PLANE) {
if (var->bits_per_pixel != 8)
return -EINVAL;
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 8f4d674fa0d0..96a6f7623e19 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -261,7 +261,6 @@ static const struct fb_ops wm8505fb_ops = {
static int wm8505fb_probe(struct platform_device *pdev)
{
struct wm8505fb_info *fbi;
- struct resource *res;
struct display_timings *disp_timing;
void *addr;
int ret;
@@ -299,8 +298,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
addr = addr + sizeof(struct wm8505fb_info);
fbi->fb.pseudo_palette = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fbi->regbase = devm_ioremap_resource(&pdev->dev, res);
+ fbi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fbi->regbase))
return PTR_ERR(fbi->regbase);
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 1ac83900a21c..7911354827dc 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -273,8 +273,7 @@ static int xilinxfb_assign(struct platform_device *pdev,
if (drvdata->flags & BUS_ACCESS_FLAG) {
struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(drvdata->regs))
return PTR_ERR(drvdata->regs);
@@ -469,8 +468,7 @@ static int xilinxfb_of_probe(struct platform_device *pdev)
pdata.yvirt = prop[1];
}
- if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
- pdata.rotate_screen = 1;
+ pdata.rotate_screen = of_property_read_bool(pdev->dev.of_node, "rotate-display");
platform_set_drvdata(pdev, drvdata);
return xilinxfb_assign(pdev, drvdata, &pdata);
diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c
index 4718d7895f0b..ada5ef6e51b7 100644
--- a/drivers/video/logo/pnmtologo.c
+++ b/drivers/video/logo/pnmtologo.c
@@ -1,15 +1,9 @@
-
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Convert a logo in ASCII PNM format to C source suitable for inclusion in
* the Linux kernel
*
* (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
- *
- * --------------------------------------------------------------------------
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
*/
#include <ctype.h>
@@ -34,37 +28,37 @@ static FILE *out;
#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */
static const char *logo_types[LINUX_LOGO_GRAY256+1] = {
- [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
- [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
- [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
- [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
+ [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
+ [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
+ [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
+ [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
};
#define MAX_LINUX_LOGO_COLORS 224
struct color {
- unsigned char red;
- unsigned char green;
- unsigned char blue;
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
};
static const struct color clut_vga16[16] = {
- { 0x00, 0x00, 0x00 },
- { 0x00, 0x00, 0xaa },
- { 0x00, 0xaa, 0x00 },
- { 0x00, 0xaa, 0xaa },
- { 0xaa, 0x00, 0x00 },
- { 0xaa, 0x00, 0xaa },
- { 0xaa, 0x55, 0x00 },
- { 0xaa, 0xaa, 0xaa },
- { 0x55, 0x55, 0x55 },
- { 0x55, 0x55, 0xff },
- { 0x55, 0xff, 0x55 },
- { 0x55, 0xff, 0xff },
- { 0xff, 0x55, 0x55 },
- { 0xff, 0x55, 0xff },
- { 0xff, 0xff, 0x55 },
- { 0xff, 0xff, 0xff },
+ { 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0xaa },
+ { 0x00, 0xaa, 0x00 },
+ { 0x00, 0xaa, 0xaa },
+ { 0xaa, 0x00, 0x00 },
+ { 0xaa, 0x00, 0xaa },
+ { 0xaa, 0x55, 0x00 },
+ { 0xaa, 0xaa, 0xaa },
+ { 0x55, 0x55, 0x55 },
+ { 0x55, 0x55, 0xff },
+ { 0x55, 0xff, 0x55 },
+ { 0x55, 0xff, 0xff },
+ { 0xff, 0x55, 0x55 },
+ { 0xff, 0x55, 0xff },
+ { 0xff, 0xff, 0x55 },
+ { 0xff, 0xff, 0xff },
};
@@ -77,438 +71,440 @@ static unsigned int logo_clutsize;
static int is_plain_pbm = 0;
static void die(const char *fmt, ...)
- __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
-static void usage(void) __attribute ((noreturn));
+__attribute__((noreturn)) __attribute((format (printf, 1, 2)));
+static void usage(void) __attribute((noreturn));
static unsigned int get_number(FILE *fp)
{
- int c, val;
-
- /* Skip leading whitespace */
- do {
- c = fgetc(fp);
- if (c == EOF)
- die("%s: end of file\n", filename);
- if (c == '#') {
- /* Ignore comments 'till end of line */
- do {
+ int c, val;
+
+ /* Skip leading whitespace */
+ do {
c = fgetc(fp);
if (c == EOF)
- die("%s: end of file\n", filename);
- } while (c != '\n');
+ die("%s: end of file\n", filename);
+ if (c == '#') {
+ /* Ignore comments 'till end of line */
+ do {
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
+ } while (c != '\n');
+ }
+ } while (isspace(c));
+
+ /* Parse decimal number */
+ val = 0;
+ while (isdigit(c)) {
+ val = 10*val+c-'0';
+ /* some PBM are 'broken'; GiMP for example exports a PBM without space
+ * between the digits. This is Ok cause we know a PBM can only have a '1'
+ * or a '0' for the digit.
+ */
+ if (is_plain_pbm)
+ break;
+ c = fgetc(fp);
+ if (c == EOF)
+ die("%s: end of file\n", filename);
}
- } while (isspace(c));
-
- /* Parse decimal number */
- val = 0;
- while (isdigit(c)) {
- val = 10*val+c-'0';
- /* some PBM are 'broken'; GiMP for example exports a PBM without space
- * between the digits. This is Ok cause we know a PBM can only have a '1'
- * or a '0' for the digit. */
- if (is_plain_pbm)
- break;
- c = fgetc(fp);
- if (c == EOF)
- die("%s: end of file\n", filename);
- }
- return val;
+ return val;
}
static unsigned int get_number255(FILE *fp, unsigned int maxval)
{
- unsigned int val = get_number(fp);
- return (255*val+maxval/2)/maxval;
+ unsigned int val = get_number(fp);
+
+ return (255*val+maxval/2)/maxval;
}
static void read_image(void)
{
- FILE *fp;
- unsigned int i, j;
- int magic;
- unsigned int maxval;
-
- /* open image file */
- fp = fopen(filename, "r");
- if (!fp)
- die("Cannot open file %s: %s\n", filename, strerror(errno));
-
- /* check file type and read file header */
- magic = fgetc(fp);
- if (magic != 'P')
- die("%s is not a PNM file\n", filename);
- magic = fgetc(fp);
- switch (magic) {
+ FILE *fp;
+ unsigned int i, j;
+ int magic;
+ unsigned int maxval;
+
+ /* open image file */
+ fp = fopen(filename, "r");
+ if (!fp)
+ die("Cannot open file %s: %s\n", filename, strerror(errno));
+
+ /* check file type and read file header */
+ magic = fgetc(fp);
+ if (magic != 'P')
+ die("%s is not a PNM file\n", filename);
+ magic = fgetc(fp);
+ switch (magic) {
case '1':
case '2':
case '3':
- /* Plain PBM/PGM/PPM */
- break;
+ /* Plain PBM/PGM/PPM */
+ break;
case '4':
case '5':
case '6':
- /* Binary PBM/PGM/PPM */
- die("%s: Binary PNM is not supported\n"
+ /* Binary PBM/PGM/PPM */
+ die("%s: Binary PNM is not supported\n"
"Use pnmnoraw(1) to convert it to ASCII PNM\n", filename);
default:
- die("%s is not a PNM file\n", filename);
- }
- logo_width = get_number(fp);
- logo_height = get_number(fp);
-
- /* allocate image data */
- logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
- if (!logo_data)
- die("%s\n", strerror(errno));
- for (i = 0; i < logo_height; i++) {
- logo_data[i] = malloc(logo_width*sizeof(struct color));
+ die("%s is not a PNM file\n", filename);
+ }
+ logo_width = get_number(fp);
+ logo_height = get_number(fp);
+
+ /* allocate image data */
+ logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
+ if (!logo_data)
+ die("%s\n", strerror(errno));
+ for (i = 0; i < logo_height; i++) {
+ logo_data[i] = malloc(logo_width*sizeof(struct color));
if (!logo_data[i])
- die("%s\n", strerror(errno));
- }
+ die("%s\n", strerror(errno));
+ }
- /* read image data */
- switch (magic) {
+ /* read image data */
+ switch (magic) {
case '1':
- /* Plain PBM */
- is_plain_pbm = 1;
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- logo_data[i][j].red = logo_data[i][j].green =
- logo_data[i][j].blue = 255*(1-get_number(fp));
- break;
+ /* Plain PBM */
+ is_plain_pbm = 1;
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = 255*(1-get_number(fp));
+ break;
case '2':
- /* Plain PGM */
- maxval = get_number(fp);
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- logo_data[i][j].red = logo_data[i][j].green =
- logo_data[i][j].blue = get_number255(fp, maxval);
- break;
+ /* Plain PGM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ logo_data[i][j].red = logo_data[i][j].green =
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ break;
case '3':
- /* Plain PPM */
- maxval = get_number(fp);
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- logo_data[i][j].red = get_number255(fp, maxval);
- logo_data[i][j].green = get_number255(fp, maxval);
- logo_data[i][j].blue = get_number255(fp, maxval);
- }
- break;
- }
+ /* Plain PPM */
+ maxval = get_number(fp);
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ logo_data[i][j].red = get_number255(fp, maxval);
+ logo_data[i][j].green = get_number255(fp, maxval);
+ logo_data[i][j].blue = get_number255(fp, maxval);
+ }
+ break;
+ }
- /* close file */
- fclose(fp);
+ /* close file */
+ fclose(fp);
}
static inline int is_black(struct color c)
{
- return c.red == 0 && c.green == 0 && c.blue == 0;
+ return c.red == 0 && c.green == 0 && c.blue == 0;
}
static inline int is_white(struct color c)
{
- return c.red == 255 && c.green == 255 && c.blue == 255;
+ return c.red == 255 && c.green == 255 && c.blue == 255;
}
static inline int is_gray(struct color c)
{
- return c.red == c.green && c.red == c.blue;
+ return c.red == c.green && c.red == c.blue;
}
static inline int is_equal(struct color c1, struct color c2)
{
- return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
+ return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
}
static void write_header(void)
{
- /* open logo file */
- if (outputname) {
- out = fopen(outputname, "w");
- if (!out)
- die("Cannot create file %s: %s\n", outputname, strerror(errno));
- } else {
- out = stdout;
- }
-
- fputs("/*\n", out);
- fputs(" * DO NOT EDIT THIS FILE!\n", out);
- fputs(" *\n", out);
- fprintf(out, " * It was automatically generated from %s\n", filename);
- fputs(" *\n", out);
- fprintf(out, " * Linux logo %s\n", logoname);
- fputs(" */\n\n", out);
- fputs("#include <linux/linux_logo.h>\n\n", out);
- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
- logoname);
+ /* open logo file */
+ if (outputname) {
+ out = fopen(outputname, "w");
+ if (!out)
+ die("Cannot create file %s: %s\n", outputname, strerror(errno));
+ } else {
+ out = stdout;
+ }
+
+ fputs("/*\n", out);
+ fputs(" * DO NOT EDIT THIS FILE!\n", out);
+ fputs(" *\n", out);
+ fprintf(out, " * It was automatically generated from %s\n", filename);
+ fputs(" *\n", out);
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+ logoname);
}
static void write_footer(void)
{
- fputs("\n};\n\n", out);
- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
- fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
- fprintf(out, "\t.width\t\t= %d,\n", logo_width);
- fprintf(out, "\t.height\t\t= %d,\n", logo_height);
- if (logo_type == LINUX_LOGO_CLUT224) {
- fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
- fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
- }
- fprintf(out, "\t.data\t\t= %s_data\n", logoname);
- fputs("};\n\n", out);
-
- /* close logo file */
- if (outputname)
- fclose(out);
+ fputs("\n};\n\n", out);
+ fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+ if (logo_type == LINUX_LOGO_CLUT224) {
+ fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+ fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
+ }
+ fprintf(out, "\t.data\t\t= %s_data\n", logoname);
+ fputs("};\n\n", out);
+
+ /* close logo file */
+ if (outputname)
+ fclose(out);
}
static int write_hex_cnt;
static void write_hex(unsigned char byte)
{
- if (write_hex_cnt % 12)
- fprintf(out, ", 0x%02x", byte);
- else if (write_hex_cnt)
- fprintf(out, ",\n\t0x%02x", byte);
- else
- fprintf(out, "\t0x%02x", byte);
- write_hex_cnt++;
+ if (write_hex_cnt % 12)
+ fprintf(out, ", 0x%02x", byte);
+ else if (write_hex_cnt)
+ fprintf(out, ",\n\t0x%02x", byte);
+ else
+ fprintf(out, "\t0x%02x", byte);
+ write_hex_cnt++;
}
static void write_logo_mono(void)
{
- unsigned int i, j;
- unsigned char val, bit;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
- die("Image must be monochrome\n");
-
- /* write file header */
- write_header();
-
- /* write logo data */
- for (i = 0; i < logo_height; i++) {
- for (j = 0; j < logo_width;) {
- for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
- if (logo_data[i][j].red)
- val |= bit;
- write_hex(val);
+ unsigned int i, j;
+ unsigned char val, bit;
+
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
+ die("Image must be monochrome\n");
+
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++) {
+ for (j = 0; j < logo_width;) {
+ for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
+ if (logo_data[i][j].red)
+ val |= bit;
+ write_hex(val);
+ }
}
- }
- /* write logo structure and file footer */
- write_footer();
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_vga16(void)
{
- unsigned int i, j, k;
- unsigned char val;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- if (k == 16)
- die("Image must use the 16 console colors only\n"
- "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
- "of colors\n");
- }
+ unsigned int i, j, k;
+ unsigned char val;
- /* write file header */
- write_header();
-
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- val = k<<4;
- if (++j < logo_width) {
- for (k = 0; k < 16; k++)
- if (is_equal(logo_data[i][j], clut_vga16[k]))
- break;
- val |= k;
- }
- write_hex(val);
- }
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ if (k == 16)
+ die("Image must use the 16 console colors only\n"
+ "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
+ "of colors\n");
+ }
- /* write logo structure and file footer */
- write_footer();
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val = k<<4;
+ if (++j < logo_width) {
+ for (k = 0; k < 16; k++)
+ if (is_equal(logo_data[i][j], clut_vga16[k]))
+ break;
+ val |= k;
+ }
+ write_hex(val);
+ }
+
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_clut224(void)
{
- unsigned int i, j, k;
-
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < logo_clutsize; k++)
- if (is_equal(logo_data[i][j], logo_clut[k]))
- break;
- if (k == logo_clutsize) {
- if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
- die("Image has more than %d colors\n"
- "Use ppmquant(1) to reduce the number of colors\n",
- MAX_LINUX_LOGO_COLORS);
- logo_clut[logo_clutsize++] = logo_data[i][j];
- }
- }
+ unsigned int i, j, k;
- /* write file header */
- write_header();
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ if (k == logo_clutsize) {
+ if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
+ die("Image has more than %d colors\n"
+ "Use ppmquant(1) to reduce the number of colors\n",
+ MAX_LINUX_LOGO_COLORS);
+ logo_clut[logo_clutsize++] = logo_data[i][j];
+ }
+ }
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++) {
- for (k = 0; k < logo_clutsize; k++)
- if (is_equal(logo_data[i][j], logo_clut[k]))
- break;
- write_hex(k+32);
+ /* write file header */
+ write_header();
+
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++) {
+ for (k = 0; k < logo_clutsize; k++)
+ if (is_equal(logo_data[i][j], logo_clut[k]))
+ break;
+ write_hex(k+32);
+ }
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+ write_hex(logo_clut[i].red);
+ write_hex(logo_clut[i].green);
+ write_hex(logo_clut[i].blue);
}
- fputs("\n};\n\n", out);
-
- /* write logo clut */
- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
- logoname);
- write_hex_cnt = 0;
- for (i = 0; i < logo_clutsize; i++) {
- write_hex(logo_clut[i].red);
- write_hex(logo_clut[i].green);
- write_hex(logo_clut[i].blue);
- }
-
- /* write logo structure and file footer */
- write_footer();
+
+ /* write logo structure and file footer */
+ write_footer();
}
static void write_logo_gray256(void)
{
- unsigned int i, j;
+ unsigned int i, j;
- /* validate image */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- if (!is_gray(logo_data[i][j]))
- die("Image must be grayscale\n");
+ /* validate image */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ if (!is_gray(logo_data[i][j]))
+ die("Image must be grayscale\n");
- /* write file header */
- write_header();
+ /* write file header */
+ write_header();
- /* write logo data */
- for (i = 0; i < logo_height; i++)
- for (j = 0; j < logo_width; j++)
- write_hex(logo_data[i][j].red);
+ /* write logo data */
+ for (i = 0; i < logo_height; i++)
+ for (j = 0; j < logo_width; j++)
+ write_hex(logo_data[i][j].red);
- /* write logo structure and file footer */
- write_footer();
+ /* write logo structure and file footer */
+ write_footer();
}
static void die(const char *fmt, ...)
{
- va_list ap;
+ va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- va_end(ap);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
- exit(1);
+ exit(1);
}
static void usage(void)
{
- die("\n"
+ die("\n"
"Usage: %s [options] <filename>\n"
"\n"
"Valid options:\n"
- " -h : display this usage information\n"
- " -n <name> : specify logo name (default: linux_logo)\n"
- " -o <output> : output to file <output> instead of stdout\n"
- " -t <type> : specify logo type, one of\n"
- " mono : monochrome black/white\n"
- " vga16 : 16 colors VGA text palette\n"
- " clut224 : 224 colors (default)\n"
- " gray256 : 256 levels grayscale\n"
+ " -h : display this usage information\n"
+ " -n <name> : specify logo name (default: linux_logo)\n"
+ " -o <output> : output to file <output> instead of stdout\n"
+ " -t <type> : specify logo type, one of\n"
+ " mono : monochrome black/white\n"
+ " vga16 : 16 colors VGA text palette\n"
+ " clut224 : 224 colors (default)\n"
+ " gray256 : 256 levels grayscale\n"
"\n", programname);
}
int main(int argc, char *argv[])
{
- int opt;
+ int opt;
- programname = argv[0];
+ programname = argv[0];
- opterr = 0;
- while (1) {
- opt = getopt(argc, argv, "hn:o:t:");
- if (opt == -1)
- break;
+ opterr = 0;
+ while (1) {
+ opt = getopt(argc, argv, "hn:o:t:");
+ if (opt == -1)
+ break;
- switch (opt) {
- case 'h':
- usage();
- break;
+ switch (opt) {
+ case 'h':
+ usage();
+ break;
- case 'n':
- logoname = optarg;
- break;
+ case 'n':
+ logoname = optarg;
+ break;
- case 'o':
- outputname = optarg;
- break;
+ case 'o':
+ outputname = optarg;
+ break;
- case 't':
- if (!strcmp(optarg, "mono"))
- logo_type = LINUX_LOGO_MONO;
- else if (!strcmp(optarg, "vga16"))
- logo_type = LINUX_LOGO_VGA16;
- else if (!strcmp(optarg, "clut224"))
- logo_type = LINUX_LOGO_CLUT224;
- else if (!strcmp(optarg, "gray256"))
- logo_type = LINUX_LOGO_GRAY256;
- else
- usage();
- break;
+ case 't':
+ if (!strcmp(optarg, "mono"))
+ logo_type = LINUX_LOGO_MONO;
+ else if (!strcmp(optarg, "vga16"))
+ logo_type = LINUX_LOGO_VGA16;
+ else if (!strcmp(optarg, "clut224"))
+ logo_type = LINUX_LOGO_CLUT224;
+ else if (!strcmp(optarg, "gray256"))
+ logo_type = LINUX_LOGO_GRAY256;
+ else
+ usage();
+ break;
- default:
- usage();
- break;
+ default:
+ usage();
+ break;
+ }
}
- }
- if (optind != argc-1)
- usage();
+ if (optind != argc-1)
+ usage();
- filename = argv[optind];
+ filename = argv[optind];
- read_image();
- switch (logo_type) {
+ read_image();
+ switch (logo_type) {
case LINUX_LOGO_MONO:
- write_logo_mono();
- break;
+ write_logo_mono();
+ break;
case LINUX_LOGO_VGA16:
- write_logo_vga16();
- break;
+ write_logo_vga16();
+ break;
case LINUX_LOGO_CLUT224:
- write_logo_clut224();
- break;
+ write_logo_clut224();
+ break;
case LINUX_LOGO_GRAY256:
- write_logo_gray256();
- break;
- }
- exit(0);
+ write_logo_gray256();
+ break;
+ }
+ exit(0);
}
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 7b4e9009f335..46f1a8d558b0 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -31,6 +31,9 @@
#define AAD_LEN 48
#define MSG_HDR_VER 1
+#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
+#define SNP_REQ_RETRY_DELAY (2*HZ)
+
struct snp_guest_crypto {
struct crypto_aead *tfm;
u8 *iv, *authtag;
@@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
return __enc_payload(snp_dev, req, payload, sz);
}
-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
- u8 type, void *req_buf, size_t req_sz, void *resp_buf,
- u32 resp_sz, __u64 *fw_err)
+static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
{
- unsigned long err;
- u64 seqno;
+ unsigned long err = 0xff, override_err = 0;
+ unsigned long req_start = jiffies;
+ unsigned int override_npages = 0;
int rc;
- /* Get message sequence and verify that its a non-zero */
- seqno = snp_get_msg_seqno(snp_dev);
- if (!seqno)
- return -EIO;
-
- memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
-
- /* Encrypt the userspace provided payload */
- rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
- if (rc)
- return rc;
-
+retry_request:
/*
* Call firmware to process the request. In this function the encrypted
* message enters shared memory with the host. So after this call the
@@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* prevent reuse of the IV.
*/
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+ switch (rc) {
+ case -ENOSPC:
+ /*
+ * If the extended guest request fails due to having too
+ * small of a certificate data buffer, retry the same
+ * guest request without the extended data request in
+ * order to increment the sequence number and thus avoid
+ * IV reuse.
+ */
+ override_npages = snp_dev->input.data_npages;
+ exit_code = SVM_VMGEXIT_GUEST_REQUEST;
- /*
- * If the extended guest request fails due to having too small of a
- * certificate data buffer, retry the same guest request without the
- * extended data request in order to increment the sequence number
- * and thus avoid IV reuse.
- */
- if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
- err == SNP_GUEST_REQ_INVALID_LEN) {
- const unsigned int certs_npages = snp_dev->input.data_npages;
-
- exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+ /*
+ * Override the error to inform callers the given extended
+ * request buffer size was too small and give the caller the
+ * required buffer size.
+ */
+ override_err = SNP_GUEST_REQ_INVALID_LEN;
/*
* If this call to the firmware succeeds, the sequence number can
@@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* of the VMPCK and the error code being propagated back to the
* user as an ioctl() return code.
*/
- rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+ goto retry_request;
- /*
- * Override the error to inform callers the given extended
- * request buffer size was too small and give the caller the
- * required buffer size.
- */
- err = SNP_GUEST_REQ_INVALID_LEN;
- snp_dev->input.data_npages = certs_npages;
+ /*
+ * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
+ * throttled. Retry in the driver to avoid returning and reusing the
+ * message sequence number on a different message.
+ */
+ case -EAGAIN:
+ if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
+ goto retry_request;
}
/*
@@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
snp_inc_msg_seqno(snp_dev);
if (fw_err)
- *fw_err = err;
+ *fw_err = override_err ?: err;
+
+ if (override_npages)
+ snp_dev->input.data_npages = override_npages;
/*
* If an extended guest request was issued and the supplied certificate
@@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* prevent IV reuse. If the standard request was successful, return -EIO
* back to the caller as would have originally been returned.
*/
- if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
+ if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
+ return -EIO;
+
+ return rc;
+}
+
+static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+ u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+ u32 resp_sz, __u64 *fw_err)
+{
+ u64 seqno;
+ int rc;
+
+ /* Get message sequence and verify that its a non-zero */
+ seqno = snp_get_msg_seqno(snp_dev);
+ if (!seqno)
return -EIO;
+ memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+
+ /* Encrypt the userspace provided payload */
+ rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+ if (rc)
+ return rc;
+
+ rc = __handle_guest_request(snp_dev, exit_code, fw_err);
if (rc) {
- dev_alert(snp_dev->dev,
- "Detected error from ASP request. rc: %d, fw_err: %llu\n",
- rc, *fw_err);
- goto disable_vmpck;
+ if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
+ return rc;
+
+ dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
+ snp_disable_vmpck(snp_dev);
+ return rc;
}
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
if (rc) {
- dev_alert(snp_dev->dev,
- "Detected unexpected decode failure from ASP. rc: %d\n",
- rc);
- goto disable_vmpck;
+ dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
+ snp_disable_vmpck(snp_dev);
+ return rc;
}
return 0;
-
-disable_vmpck:
- snp_disable_vmpck(snp_dev);
- return rc;
}
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
@@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
void __iomem *mapping;
int ret;
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return -ENODEV;
+
if (!dev->platform_data)
return -ENODEV;
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 62c44616d8a9..3d8b51316bef 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -442,8 +442,7 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
}
-static int ds2482_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds2482_probe(struct i2c_client *client)
{
struct ds2482_data *data;
int err = -ENODEV;
@@ -553,7 +552,7 @@ static struct i2c_driver ds2482_driver = {
.driver = {
.name = "ds2482",
},
- .probe = ds2482_probe,
+ .probe_new = ds2482_probe,
.remove = ds2482_remove,
.id_table = ds2482_id,
};
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index c6c73a33c44d..b799bc759c15 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -64,7 +64,7 @@ static int xensyms_next_sym(struct xensyms *xs)
static void *xensyms_start(struct seq_file *m, loff_t *pos)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
xs->op.u.symdata.symnum = *pos;
@@ -76,7 +76,7 @@ static void *xensyms_start(struct seq_file *m, loff_t *pos)
static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
xs->op.u.symdata.symnum = ++(*pos);
@@ -88,7 +88,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
static int xensyms_show(struct seq_file *m, void *p)
{
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
struct xenpf_symdata *symdata = &xs->op.u.symdata;
seq_printf(m, "%016llx %c %s\n", symdata->address,
@@ -120,7 +120,7 @@ static int xensyms_open(struct inode *inode, struct file *file)
return ret;
m = file->private_data;
- xs = (struct xensyms *)m->private;
+ xs = m->private;
xs->namelen = XEN_KSYM_NAME_LEN + 1;
xs->name = kzalloc(xs->namelen, GFP_KERNEL);
@@ -138,7 +138,7 @@ static int xensyms_open(struct inode *inode, struct file *file)
static int xensyms_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
- struct xensyms *xs = (struct xensyms *)m->private;
+ struct xensyms *xs = m->private;
kfree(xs->name);
return seq_release_private(inode, file);